Full Code of IBM/pytorchpipe for AI

develop 9cb172716660 cached
278 files
1.2 MB
305.4k tokens
655 symbols
1 requests
Download .txt
Showing preview only (1,363K chars total). Download the full file or copy to clipboard to get everything.
Repository: IBM/pytorchpipe
Branch: develop
Commit: 9cb172716660
Files: 278
Total size: 1.2 MB

Directory structure:
gitextract_zxl7rewp/

├── .coveralls.yml
├── .gitignore
├── .lgtm.yml
├── .travis.yml
├── LICENSE
├── README.md
├── configs/
│   ├── cifar100/
│   │   ├── cifar100_classification_convnet_softmax.yml
│   │   └── default_cifar100.yml
│   ├── clevr/
│   │   ├── clevr_all_vgg_glove_lstm_concat_ffn.yml
│   │   ├── clevr_image_convnet_ffn.yml
│   │   ├── clevr_question_glove_lstm.yml
│   │   └── default_clevr.yml
│   ├── default/
│   │   ├── components/
│   │   │   ├── language/
│   │   │   │   ├── bow_encoder.yml
│   │   │   │   ├── label_indexer.yml
│   │   │   │   ├── sentence_indexer.yml
│   │   │   │   ├── sentence_one_hot_encoder.yml
│   │   │   │   ├── sentence_tokenizer.yml
│   │   │   │   └── word_decoder.yml
│   │   │   ├── losses/
│   │   │   │   └── nll_loss.yml
│   │   │   ├── masking/
│   │   │   │   ├── join_masked_predictions.yml
│   │   │   │   └── string_to_mask.yml
│   │   │   ├── models/
│   │   │   │   ├── general_usage/
│   │   │   │   │   ├── attention_decoder.yml
│   │   │   │   │   ├── feed_forward_network.yml
│   │   │   │   │   ├── recurrent_neural_network.yml
│   │   │   │   │   └── seq2seq.yml
│   │   │   │   ├── language/
│   │   │   │   │   ├── index_embeddings.yml
│   │   │   │   │   └── sentence_embeddings.yml
│   │   │   │   ├── multi_modal_reasoning/
│   │   │   │   │   ├── compact_bilinear_pooling.yml
│   │   │   │   │   ├── factorized_bilinear_pooling.yml
│   │   │   │   │   ├── low_rank_bilinear_pooling.yml
│   │   │   │   │   ├── question_driven_attention.yml
│   │   │   │   │   ├── relational_network.yml
│   │   │   │   │   └── self_attention.yml
│   │   │   │   └── vision/
│   │   │   │       ├── convnet_encoder.yml
│   │   │   │       ├── generic_image_encoder.yml
│   │   │   │       └── lenet5.yml
│   │   │   ├── publishers/
│   │   │   │   ├── global_variable_publisher.yml
│   │   │   │   └── stream_file_exporter.yml
│   │   │   ├── statistics/
│   │   │   │   ├── accuracy_statistics.yml
│   │   │   │   ├── batch_size_statistics.yml
│   │   │   │   ├── bleu_statistics.yml
│   │   │   │   └── precision_recall_statistics.yml
│   │   │   ├── tasks/
│   │   │   │   ├── image_text_to_class/
│   │   │   │   │   ├── clevr.yml
│   │   │   │   │   ├── gqa.yml
│   │   │   │   │   └── vqa_med_2019.yml
│   │   │   │   ├── image_to_class/
│   │   │   │   │   ├── cifar_100.yml
│   │   │   │   │   ├── mnist.yml
│   │   │   │   │   └── simple_molecules.yml
│   │   │   │   ├── text_to_class/
│   │   │   │   │   ├── dummy_language_identification.yml
│   │   │   │   │   ├── wily_language_identification.yml
│   │   │   │   │   └── wily_ngram_language_modeling.yml
│   │   │   │   └── text_to_text/
│   │   │   │       ├── translation_pairs.yml
│   │   │   │       └── wikitext_language_modeling.yml
│   │   │   ├── transforms/
│   │   │   │   ├── concatenate_tensor.yml
│   │   │   │   ├── list_to_tensor.yml
│   │   │   │   ├── non_linearity.yml
│   │   │   │   ├── reduce_tensor.yml
│   │   │   │   └── reshape_tensor.yml
│   │   │   └── viewers/
│   │   │       ├── image_viewer.yml
│   │   │       └── stream_viewer.yml
│   │   └── workers/
│   │       ├── offline_trainer.yml
│   │       ├── online_trainer.yml
│   │       └── processor.yml
│   ├── mnist/
│   │   ├── default_mnist.yml
│   │   ├── mnist_classification_convnet_softmax.yml
│   │   ├── mnist_classification_kfold_softmax.yml
│   │   ├── mnist_classification_lenet5.yml
│   │   ├── mnist_classification_softmax.yml
│   │   ├── mnist_classification_vf_2lenet5_2losses.yml
│   │   └── mnist_classification_vf_shared_convnet_2softmaxes_2losses.yml
│   ├── molecule_classification/
│   │   ├── default_molecule_classification.yml
│   │   ├── molecule_classification_convnet_softmax.yml
│   │   └── molecule_classification_vgg16_molecules.yml
│   ├── translation/
│   │   └── eng_fra_translation_enc_attndec.yml
│   ├── tutorials/
│   │   └── mnist_classification_convnet_softmax.yml
│   ├── vqa_med_2019/
│   │   ├── c1_classification/
│   │   │   ├── c1_classification_all_bow_vgg16_concat.yml
│   │   │   ├── c1_classification_all_rnn_vgg16_concat.yml
│   │   │   ├── c1_classification_image_cnn_softmax.yml
│   │   │   ├── c1_classification_image_size_softmax.yml
│   │   │   ├── c1_classification_question_mimic_rnn.yml
│   │   │   ├── c1_classification_question_onehot_bow.yml
│   │   │   ├── c1_classification_question_rnn.yml
│   │   │   ├── c1_classification_vf_question_rnn_separate_q_categorization.yml
│   │   │   └── default_c1_classification.yml
│   │   ├── c2_classification/
│   │   │   ├── c2_class_lstm_resnet152_ewm_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet152_rn_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_attn_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_coattn_mfb_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_ewm_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_mfb_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_rn_cat_is.yml
│   │   │   ├── c2_class_lstm_selfattn.yml
│   │   │   ├── c2_class_lstm_vgg16_rn.yml
│   │   │   ├── c2_class_lstm_vgg16_rn_cat_is.yml
│   │   │   ├── c2_classification_all_rnn_vgg16_concat.yml
│   │   │   ├── c2_classification_all_rnn_vgg16_ewm.yml
│   │   │   ├── c2_classification_all_rnn_vgg16_ewm_size.yml
│   │   │   ├── c2_classification_all_rnn_vgg16_mcb.yml
│   │   │   ├── c2_word_answer_onehot_bow.yml
│   │   │   └── default_c2_classification.yml
│   │   ├── c3_classification/
│   │   │   ├── c3_classification_all_bow_vgg16_concat.yml
│   │   │   ├── c3_classification_all_concat.yml
│   │   │   ├── c3_classification_all_rnn_vgg16_concat.yml
│   │   │   ├── c3_classification_image_cnn_softmax.yml
│   │   │   ├── c3_classification_image_plus_size_concat.yml
│   │   │   ├── c3_classification_image_size_softmax.yml
│   │   │   ├── c3_classification_image_softmax.yml
│   │   │   ├── c3_classification_image_vgg16_softmax.yml
│   │   │   ├── c3_classification_question_onehot_bow.yml
│   │   │   ├── c3_classification_question_rnn.yml
│   │   │   └── default_c3_classification.yml
│   │   ├── c4_classification/
│   │   │   ├── c4_classification_all_rnn_vgg16_ewm_size.yml
│   │   │   ├── c4_enc_attndec.yml
│   │   │   ├── c4_enc_attndec_resnet152_ewm_cat_is.yml
│   │   │   ├── c4_frozen_if_gru_dec.yml
│   │   │   ├── c4_word_answer_glove_sum.yml
│   │   │   ├── c4_word_answer_mimic_sum.yml
│   │   │   ├── c4_word_answer_onehot_bow.yml
│   │   │   ├── c4_word_answer_onehot_sum.yml
│   │   │   └── default_c4_classification.yml
│   │   ├── default_vqa_med_2019.yml
│   │   ├── evaluation/
│   │   │   ├── deepta/
│   │   │   │   ├── glove_gru_resnet50_coattn_mfb_is_cat_ffn_c123_loss.yml
│   │   │   │   └── glove_gru_vgg16_coattn_mfb_is_cat_ffn_c1234_loss.yml
│   │   │   ├── example_mimic_lstm_vgg16_ewm_is_cat_ffn_c123_loss.yml
│   │   │   ├── frozen_if_ffn_c1234_loss.yml
│   │   │   ├── frozen_if_ffn_c123_loss.yml
│   │   │   ├── frozen_if_vf_5ffn_c1234yn_5losses.yml
│   │   │   ├── frozen_if_vf_5ffn_support_c1234yn_5losses.yml
│   │   │   └── tom/
│   │   │       ├── glove_lstm_resnet152_att_is_cat_ffn_c123_loss.yml
│   │   │       ├── glove_lstm_resnet152_mcb_is_cat_ffn_c123_loss.yml
│   │   │       ├── glove_lstm_vgg16_att_is_cat_ffn_c123_loss.yml
│   │   │       ├── glove_lstm_vgg16_ewm_is_cat_ffn_c123_loss.yml
│   │   │       └── glove_lstm_vgg16_mcb_is_cat_ffn_c123_loss.yml
│   │   ├── extend_answers.yml
│   │   ├── extend_answers_c4.yml
│   │   ├── frozen_pipelines/
│   │   │   ├── frozen_input_fusion_glove_lstm_vgg_att_is_cat.yml
│   │   │   ├── frozen_question_categorization_glove_rnn_ffn.yml
│   │   │   ├── frozen_word_answer_glove_sum.yml
│   │   │   └── input_fusion_processor_io.yml
│   │   ├── question_categorization/
│   │   │   ├── default_question_categorization.yml
│   │   │   ├── question_categorization_onehot_bow.yml
│   │   │   ├── question_categorization_onehot_rnn.yml
│   │   │   ├── question_categorization_rnn.yml
│   │   │   └── question_categorization_rnn_ffn.yml
│   │   └── vf/
│   │       ├── c1_binary_vf_cat_hard_shared_question_rnn_two_ffns_losses.yml
│   │       ├── c1_binary_vf_cat_rnn_shared_all_encoders_two_ffns_losses.yml
│   │       ├── c1_binary_vf_cat_rnn_shared_question_rnn_two_ffns_losses.yml
│   │       ├── c1_c2_c3_binary_vf_cat_rnn_shared_all_encoders_four_ffns_losses.yml
│   │       ├── c1_c3_binary_vf_cat_rnn_shared_all_encoders_three_ffns_losses.yml
│   │       ├── lstm_resnet152_is_cat_ffn_c123_no_binary_loss.yml
│   │       ├── lstm_resnet50_ewm_is_cat_ffn_c123_loss_ffn_yn_loss.yml
│   │       ├── lstm_resnet50_ewm_is_cat_ffn_c123_no_binary_loss.yml
│   │       ├── lstm_resnet50_is_cat_ffn_c123_no_binary_loss.yml
│   │       ├── lstm_vgg16_is_cat_ffn_c123_binary_yn_loss.yml
│   │       ├── lstm_vgg16_is_cat_ffn_c123_no_yn_loss.yml
│   │       └── lstm_vgg16_is_cat_ffn_only_yn_loss.yml
│   ├── wikitext/
│   │   ├── wikitext_language_modeling_encoder_attndecoder.yml
│   │   ├── wikitext_language_modeling_rnn.yml
│   │   ├── wikitext_language_modeling_seq2seq.yml
│   │   └── wikitext_language_modeling_seq2seq_simple.yml
│   └── wily/
│       ├── dummy_language_identification_bow.yml
│       ├── wily_language_identification_bow.yml
│       └── wily_ngram_language_modeling.yml
├── ptp/
│   ├── __init__.py
│   ├── application/
│   │   ├── __init__.py
│   │   ├── component_factory.py
│   │   ├── pipeline_manager.py
│   │   ├── sampler_factory.py
│   │   └── task_manager.py
│   ├── components/
│   │   ├── component.py
│   │   ├── language/
│   │   │   ├── __init__.py
│   │   │   ├── bow_encoder.py
│   │   │   ├── label_indexer.py
│   │   │   ├── sentence_indexer.py
│   │   │   ├── sentence_one_hot_encoder.py
│   │   │   ├── sentence_tokenizer.py
│   │   │   └── word_decoder.py
│   │   ├── losses/
│   │   │   ├── __init__.py
│   │   │   ├── loss.py
│   │   │   └── nll_loss.py
│   │   ├── masking/
│   │   │   ├── __init__.py
│   │   │   ├── join_masked_predictions.py
│   │   │   └── string_to_mask.py
│   │   ├── mixins/
│   │   │   ├── embeddings.py
│   │   │   ├── io.py
│   │   │   └── word_mappings.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── general_usage/
│   │   │   │   ├── attention_decoder.py
│   │   │   │   ├── feed_forward_network.py
│   │   │   │   ├── recurrent_neural_network.py
│   │   │   │   └── seq2seq.py
│   │   │   ├── language/
│   │   │   │   ├── index_embeddings.py
│   │   │   │   └── sentence_embeddings.py
│   │   │   ├── model.py
│   │   │   ├── multi_modal_reasoning/
│   │   │   │   ├── compact_bilinear_pooling.py
│   │   │   │   ├── factorized_bilinear_pooling.py
│   │   │   │   ├── low_rank_bilinear_pooling.py
│   │   │   │   ├── question_driven_attention.py
│   │   │   │   ├── relational_network.py
│   │   │   │   └── self_attention.py
│   │   │   └── vision/
│   │   │       ├── convnet_encoder.py
│   │   │       ├── generic_image_encoder.py
│   │   │       └── lenet5.py
│   │   ├── publishers/
│   │   │   ├── __init__.py
│   │   │   ├── global_variable_publisher.py
│   │   │   └── stream_file_exporter.py
│   │   ├── statistics/
│   │   │   ├── __init__.py
│   │   │   ├── accuracy_statistics.py
│   │   │   ├── batch_size_statistics.py
│   │   │   ├── bleu_statistics.py
│   │   │   └── precision_recall_statistics.py
│   │   ├── tasks/
│   │   │   ├── image_text_to_class/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── clevr.py
│   │   │   │   ├── gqa.py
│   │   │   │   └── vqa_med_2019.py
│   │   │   ├── image_to_class/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── cifar_100.py
│   │   │   │   ├── mnist.py
│   │   │   │   └── simple_molecules.py
│   │   │   ├── task.py
│   │   │   ├── text_to_class/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── dummy_language_identification.py
│   │   │   │   ├── language_identification.py
│   │   │   │   ├── wily_language_identification.py
│   │   │   │   └── wily_ngram_language_modeling.py
│   │   │   └── text_to_text/
│   │   │       ├── __init__.py
│   │   │       ├── translation_pairs.py
│   │   │       └── wikitext_language_modeling.py
│   │   ├── transforms/
│   │   │   ├── __init__.py
│   │   │   ├── concatenate_tensor.py
│   │   │   ├── list_to_tensor.py
│   │   │   ├── reduce_tensor.py
│   │   │   └── reshape_tensor.py
│   │   └── viewers/
│   │       ├── __init__.py
│   │       ├── image_viewer.py
│   │       └── stream_viewer.py
│   ├── configuration/
│   │   ├── __init__.py
│   │   ├── config_interface.py
│   │   ├── config_parsing.py
│   │   ├── config_registry.py
│   │   └── configuration_error.py
│   ├── data_types/
│   │   ├── __init__.py
│   │   ├── data_definition.py
│   │   └── data_streams.py
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── app_state.py
│   │   ├── data_streams_parallel.py
│   │   ├── globals_facade.py
│   │   ├── key_mappings_facade.py
│   │   ├── logger.py
│   │   ├── samplers.py
│   │   ├── singleton.py
│   │   ├── statistics_aggregator.py
│   │   ├── statistics_collector.py
│   │   └── termination_condition.py
│   └── workers/
│       ├── __init__.py
│       ├── offline_trainer.py
│       ├── online_trainer.py
│       ├── processor.py
│       ├── test_data_dict_parallel.py
│       ├── trainer.py
│       └── worker.py
├── setup.py
└── tests/
    ├── __init__.py
    ├── application/
    │   ├── pipeline_tests.py
    │   ├── sampler_factory_tests.py
    │   └── samplers_tests.py
    ├── components/
    │   ├── component_tests.py
    │   └── tasks/
    │       ├── clevr_tests.py
    │       ├── gqa_tests.py
    │       └── task_tests.py
    ├── configuration/
    │   ├── config_interface_tests.py
    │   ├── config_registry_tests.py
    │   └── handshaking_tests.py
    ├── data_types/
    │   ├── data_definition_tests.py
    │   └── data_streams_tests.py
    └── utils/
        ├── app_state_tests.py
        └── statistics_tests.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .coveralls.yml
================================================
service_name: travis-ci

================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/

# vscode
.vscode/

================================================
FILE: .lgtm.yml
================================================
extraction:
  python:
    python_setup:
      version: 3
    index:
      exclude:
        - .git
    #after_prepare:
    #  - python3 -m pip install --upgrade --user flake8
    #before_index:
    #  - python3 -m flake8 --version  # flake8 3.6.0 on CPython 3.6.5 on Linux
    #  # stop the build if there are Python syntax errors or undefined names
    #  - python3 -m flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics
    #  # exit-zero treats all errors as warnings.  The GitHub editor is 127 chars wide
    #  - python3 -m flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics


================================================
FILE: .travis.yml
================================================
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

language: python
python: 3.6

# Safelist: focus Travis' attention on the master and develop branches only.
branches:
  only:
    - master
    - develop

#install:
#  - pip3 install -r requirements.txt

before_install:
  - sudo apt-get update
  # Install conda.
  - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
  - bash miniconda.sh -b -p $HOME/miniconda
  - export PATH="$HOME/miniconda/bin:$PATH"
  # Set conda to always "--yes" mode.
  - conda config --set always_yes yes --set changeps1 no
  - conda update -q conda
  # Create env and install pytorch
  - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION pytorch scipy -c pytorch
  - source activate test-environment
  # Coveralls
  - pip install coveralls

install:
  - python setup.py install

script:
  # Test plake8 compatibility.
  #- python3 -m flake8 --version  # flake8 3.6.0 on CPython 3.6.5 on Linux
  # stop the build if there are Python syntax errors or undefined names
  #- python3 -m flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics
  # exit-zero treats all errors as warnings.  The GitHub editor is 127 chars wide
  #- python3 -m flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
  # Run unittests in ptp/test.
  #- python -m unittest ptp
  - coverage run -m unittest #discover -s ptp
  # Build documentation.
  # TODO.

after_success:
  # Coverals.
  - coveralls

================================================
FILE: LICENSE
================================================
                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [yyyy] [name of copyright owner]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


================================================
FILE: README.md
================================================
# PyTorchPipe

![Language](https://img.shields.io/badge/language-Python-blue.svg)
[![GitHub license](https://img.shields.io/github/license/IBM/pytorchpipe.svg)](https://github.com/IBM/pytorchpipe/blob/develop/LICENSE)
[![GitHub version](https://badge.fury.io/gh/IBM%2Fpytorchpipe.svg)](https://badge.fury.io/gh/IBM%2Fpytorchpipe)

[![Build Status](https://travis-ci.com/IBM/pytorchpipe.svg?branch=develop)](https://travis-ci.com/IBM/pytorchpipe)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/IBM/pytorchpipe.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/IBM/pytorchpipe/context:python)
[![Total alerts](https://img.shields.io/lgtm/alerts/g/IBM/pytorchpipe.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/IBM/pytorchpipe/alerts/)
[![Coverage Status](https://coveralls.io/repos/github/IBM/pytorchpipe/badge.svg?branch=develop)](https://coveralls.io/github/IBM/pytorchpipe?branch=develop)
[![Maintainability](https://api.codeclimate.com/v1/badges/e8d37123b856ee5bb10b/maintainability)](https://codeclimate.com/github/IBM/pytorchpipe/maintainability)

## Description

PyTorchPipe (PTP) is a component-oriented framework that facilitates development of computational _multi-modal pipelines_ and comparison of diverse neural network-based models.

PTP frames training and testing procedures as _pipelines_ consisting of many components communicating through data streams.
Each such a stream can consist of several components, including one task instance (providing batches of data), any number of trainable components (models) and additional components providing required transformations and computations.


![Alt text](docs/source/img/data_flow_vqa_5_attention_gpu_loaders.png?raw=true "Exemplary multi-modal data flow diagram")


As a result, the training & testing procedures are no longer pinned to a specific task or model, and built-in mechanisms for compatibility checking (handshaking), configuration and global variables management & statistics collection facilitate rapid development of complex pipelines and running diverse experiments.

In its core, to _accelerate the computations_ on their own, PTP relies on PyTorch and extensively uses its mechanisms for distribution of computations on CPUs/GPUs, including multi-process data loaders and multi-GPU data parallelism.
The models are _agnostic_ to those operations and one indicates whether to use them in configuration files (data loaders) or by passing adequate argument (--gpu) at run-time.

Please refer to the  [tutorial presentation](https://zenodo.org/record/3269928) for more details.

**Datasets:**
PTP focuses on multi-modal reasoning combining vision and language. Currently it offers the following _Tasks_ from the following task, categorized into three domains:

![Alt text](docs/source/img/components/ptp_tasks.png?raw=true)

Aside of providing batches of samples, the Task class will automatically download the files associated with a given dataset (as long as the dataset is publicly available).
The diversity of those tasks (and the associated models) proves the flexibility of the framework.
We are constantly working on incorporation of new Tasks into PTP.

**Pipelines:**
What people typically define as a _model_ in PTP is framed as a _pipeline_, consisting of many inter-connected components, with one or more _Models_ containing trainable elements.
Those components are loosely coupled and care only about the _input streams_ they retrieve and _output streams_ they produce.
The framework offers full flexibility and it is up to the programmer to choose the _granularity_ of his/her components/models/pipelines.
Such a decomposition enables one to easily combine many components and models into pipelines, whereas the framework supports loading of pretrained models, freezing during training, saving them to checkpoints etc.

**Model/Component Zoo:**
PTP provides several ready to use, out of the box models and other, non-trainable (but parametrizable) components.


![Alt text](docs/source/img/components/ptp_models.png?raw=true)

The model zoo includes several general usage components, such as:
  * Feed Forward Network (variable number of Fully Connected layers with activation functions and dropout)
  * Recurrent Neural Network (different cell types with activation functions and dropout, a single model can work both as encoder or decoder)

It also inludes few models specific for a given domain, but still quite general:
  * Convnet Encoder (CNNs with ReLU and MaxPooling, can work with different sizes of images)
  * General Image Encoder (wrapping several models from Torch Vision)
  * Sentence Embeddings (encoding words using the embedding layer)

There are also some classical baselines both for vision like LeNet-5 or language domains, e.g. Seq2Seq (Sequence to Sequence model) or Attention Decoder (RNN-based decoder implementing Bahdanau-style attention).
PTP also offers the several models useful for multi-modal fusion and reasoning.

![Alt text](docs/source/img/components/ptp_components_others.png?raw=true)

The framework also offers components useful when working with language, vision or other types of streams (e.g. tensor transformations).
There are also several general-purpose components, from components calculating losses and statistics to publishers and viewers.

**Workers:**
PTP workers are python scripts that are _agnostic_ to the tasks/models/pipelines that they are supposed to work with.
Currently framework offers three workers:

  * ptp-offline-trainer (a trainer relying on classical methodology interlacing training and validation at the end of every epoch, creates separate instances of training and validation tasks and trains the models by feeding the created pipeline with batches of data, relying on the notion of an _epoch_)

  * ptp-online-trainer (a flexible trainer creating separate instances of training and validation tasks and training the models by feeding the created pipeline with batches of data, relying on the notion of an _episode_)

  * ptp-processor (performing one pass over the all samples returned by a given task instance, useful for collecting scores on test set, answers for submissions to competitions etc.)


## Installation

PTP relies on [PyTorch](https://github.com/pytorch/pytorch), so you need to install it first.
Please refer to the official installation [guide](https://github.com/pytorch/pytorch#installation) for details.
It is easily installable via conda_, or you can compile it from source to optimize it for your machine.

PTP is not (yet) available as a [pip](https://pip.pypa.io/en/stable/quickstart/) package, or on [conda](https://anaconda.org/pytorch/pytorch).
However, we provide the `setup.py` script and recommend to use it for installation.
First please clone the project repository:

```console
git clone git@github.com:IBM/pytorchpipe.git
cd pytorchpipe/
```

Next, install the dependencies by running:

```console
  python setup.py develop
```

This command will install all dependencies via pip_, while still enabling you to change the code of the existing components/workers and running them by calling the associated ``ptp-*`` commands.
More in that subject can be found in the following blog post on [dev_mode](https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode).


## Quick start: MNIST image classification with a simple ConvNet model

Please consider a simple ConvNet model consisting of two parts: 
  * few convolutional layers accepting the MNIST images and returning feature maps being, in general, a 4D tensor (first dimension being the batch size, a rule of thumb in PTP),
  * one (or more) dense layers that accept the (flattened) feature maps and return predictions in the form of logarithm of probability distributions (LogSoftmax as last non-linearity).

### Training the model

Assume that we will use ```NLL Loss``` function, and, besides, want to monitor the ```Accuracy``` statistics.
The resulting pipeline is presented below.
The additional ```Answer Decoder``` component translates the predictions into class names, whereas ```Stream Viewer``` displays content of the indicated data streams for a single sample randomly picked from the batch.


![Alt text](docs/source/img/1_tutorials/data_flow_tutorial_mnist_1_training.png?raw=true "Trainining of a simple ConvNet model on MNIST dataset")

__Note__: The associated ```mnist_classification_convnet_softmax.yml``` configuration file can be found in ```configs/tutorials``` folder.

We will train the model with _ptp-offline-trainer_, a general _worker_ script that follows the classical training-validation, epoch-based methodology.
This means, that despite the presence of three sections (associated with training, validation and test splits of the MNIST dataset) the trainer will consider only the content of ``training`` and ```validation``` sections (plus ```pipeline```, containing the definition of the whole pipeline).
Let's run the training by calling the following from the command line:

```console
ptp-offline-trainer --c configs/tutorials/mnist_classification_convnet_softmax.yml
```

__Note__: Please call ```offline-trainer --h``` to learn more about the run-time arguments. In order to understand the structure of the main configuration file please look at the default configuration file of the trainer located in ```configs/default/workers``` folder.

The trainer will log on the console training and validation statistis, along with additional information logged by the components, e.g. contents of the streams:

```console
[2019-07-05 13:31:44] - INFO - OfflineTrainer >>> episode 006000; epoch 06; loss 0.1968410313; accuracy 0.9219
[2019-07-05 13:31:45] - INFO - OfflineTrainer >>> End of epoch: 6
================================================================================
[2019-07-05 13:31:45] - INFO - OfflineTrainer >>> episode 006019; episodes_aggregated 000860; epoch 06; loss 0.1799264401; loss_min 0.0302138925; loss_max 0.5467863679; loss_std 0.0761705562; accuracy 0.94593; accuracy_std 0.02871 [Full Training]
[2019-07-05 13:31:45] - INFO - OfflineTrainer >>> Validating over the entire validation set (5000 samples in 79 episodes)
[2019-07-05 13:31:45] - INFO - stream_viewer >>> Showing selected streams for sample 20 (index: 55358):
 'labels': One
 'targets': 1
 'predictions': tensor([-1.1452e+01, -1.6804e-03, -1.1357e+01, -1.1923e+01, -6.6160e+00,
        -1.4658e+01, -9.6191e+00, -8.6472e+00, -9.6082e+00, -1.3505e+01])
 'predicted_answers': One
```

Please note that whenever the validation loss goes down, the trainer automatically will save the pipeline to the checkpoint file:

```console
[2019-07-05 13:31:47] - INFO - OfflineTrainer >>> episode 006019; episodes_aggregated 000079; epoch 06; loss 0.1563445479; loss_min 0.0299939774; loss_max 0.5055227876; loss_std 0.0854654983; accuracy 0.95740; accuracy_std 0.02495 [Full Validation]
[2019-07-05 13:31:47] - INFO - mnist_classification_convnet_softmax >>> Exporting pipeline 'mnist_classification_convnet_softmax' parameters to checkpoint:
 /users/tomaszkornuta/experiments/mnist/mnist_classification_convnet_softmax/20190705_132624/checkpoints/mnist_classification_convnet_softmax_best.pt
  + Model 'image_encoder' [ConvNetEncoder] params saved
  + Model 'classifier' [FeedForwardNetwork] params saved
```

After the training finsh the trainer will inform about the termination reason and indicate where the experiment files (model checkpoint, log files, statistics etc.) can be found:

```console
[2019-07-05 13:32:33] - INFO - mnist_classification_convnet_softmax >>> Updated training status in checkpoint:
 /users/tomaszkornuta/experiments/mnist/mnist_classification_convnet_softmax/20190705_132624/checkpoints/mnist_classification_convnet_softmax_best.pt
[2019-07-05 13:32:33] - INFO - OfflineTrainer >>>
================================================================================
[2019-07-05 13:32:33] - INFO - OfflineTrainer >>> Training finished because Converged (Full Validation Loss went below Loss Stop threshold of 0.15)
[2019-07-05 13:32:33] - INFO - OfflineTrainer >>> Experiment finished!
[2019-07-05 13:32:33] - INFO - OfflineTrainer >>> Experiment logged to: /users/tomaszkornuta/experiments/mnist/mnist_classification_convnet_softmax/20190705_132624/
```


### Testing the model

In order to test the model generalization we will use _ptp-processor_, yet another general _worker_ script that performs a single pass over the indicated set.


![Alt text](docs/source/img/1_tutorials/data_flow_tutorial_mnist_2_test.png?raw=true "Test of the pretrained model on test split of the MNIST dataset ")


```console
ptp-processor --load /users/tomaszkornuta/experiments/mnist/mnist_classification_convnet_softmax/20190705_132624/checkpoints/mnist_classification_convnet_softmax_best.pt
```

__Note__: _ptp-processor_ uses the content of _test_ section as default, but it can be changed at run-time. Please call ```ptp-processor --h``` to learn about the available run-time arguments.


```console
[2019-07-05 13:34:41] - INFO - Processor >>> episode 000313; episodes_aggregated 000157; loss 0.1464060694; loss_min 0.0352710858; loss_max 0.3801054060; loss_std 0.0669835582; accuracy 0.95770; accuracy_std 0.02471 [Full Set]
[2019-07-05 13:34:41] - INFO - Processor >>> Experiment logged to: /users/tomaszkornuta/experiments/mnist/mnist_classification_convnet_softmax/20190705_132624/test_20190705_133436/
```

__Note__: Please analyze the ```mnist_classification_convnet_softmax.yml``` configuration file (located in ```configs/tutorials``` directory). Keep in mind that:
  * all components come with default configuration files, located in ```configs/default/components``` folders,
  * all workers come with default configuration files, located in ```configs/default/workers``` folders.
## Documentation

Currently PTP does not have an on-line documentation.
However, there are high-quality comments in all source/configuration files, that will be used for automatic generation of documentation (Sphinx + ReadTheDocs).
Besides, we have shared a [tutorial presentation](https://zenodo.org/record/3269928) explaining motivations and core concepts as well as providing hints how to use the tool and develop your own solutions.


## Contributions

PTP is open for external contributions.
We follow the [Git Branching Model](https://nvie.com/posts/a-successful-git-branching-model/), in short:
  * ```develop``` branch is the main branch, ```master``` branch is for used for releases only
  * all changes are integrated by merging pull requests from feat/fix/other branches
  * PTP is integrated with several DevOps monitoring the quality of code/pull requests
  * we strongly encourage unit testing and Test-Driven Development
  * we use projects and kanban to monitor issues/progress/etc.


## Maintainers

A project of the Machine Intelligence team, IBM Research AI, Almaden Research Center.

* Tomasz Kornuta (tkornut@us.ibm.com)

[![HitCount](http://hits.dwyl.io/tkornut/tkornut/pytorchpipe.svg)](http://hits.dwyl.io/tkornut/tkornut/pytorchpipe)


================================================
FILE: configs/cifar100/cifar100_classification_convnet_softmax.yml
================================================
# Load config defining CIFAR100 tasks for training, validation and testing.
default_configs: cifar100/default_cifar100.yml

# Definition of the pipeline.
pipeline:

  # Model consisting of two components.
  image_encoder:
    priority: 1.1
    type: ConvNetEncoder

  # Reshape inputs
  reshaper:
    priority: 1.2
    type: ReshapeTensor
    input_dims: [-1, 16, 2, 2]
    output_dims: [-1, 64]
    streams:
      inputs: feature_maps
      outputs: reshaped_maps
    globals:
      output_size: reshaped_maps_size

  # Image classifier.
  classifier:
    priority: 1.3
    type: FeedForwardNetwork 
    streams:
      inputs: reshaped_maps
    globals:
      input_size: reshaped_maps_size
      prediction_size: num_fine_classes

#: pipeline


================================================
FILE: configs/cifar100/default_cifar100.yml
================================================
# Training parameters:
training:
  task: 
    type: CIFAR100
    batch_size: &b 1024
    use_train_data: True
  # Use sampler that operates on a subset.
  dataloader:
    num_workers: 10
  #  shuffle: False
  sampler:
    type: SubsetRandomSampler
    indices: [0, 45000]
  # optimizer parameters:
  optimizer:
    type: Adam
    lr: 0.001
  # settings parameters
  terminal_conditions:
    loss_stop_threshold: 0.05
    early_stop_validations: -1
    episode_limit: 10000
    epoch_limit: 10

# Validation parameters:
validation:
  #partial_validation_interval: 100
  task:
    type: CIFAR100
    batch_size: *b
    use_train_data: True  # True because we are splitting the training set to: validation and training
    #resize: [32, 32]
  # Use sampler that operates on a subset.
  sampler:
    type: SubsetRandomSampler
    indices: [45000, 50000]

# Testing parameters:
test:
  task:
    type: MNIST
    batch_size: *b
    use_train_data: False
    #resize: [32, 32]

pipeline:
  disable: image_viewer

  # Loss
  nllloss:
    type: NLLLoss
    priority: 10.0
    streams:
      targets: fine_targets

  # Statistics.
  batch_size:
    priority: 100.0
    type: BatchSizeStatistics
    streams:
      targets: fine_targets

  accuracy:
    priority: 100.1
    type: AccuracyStatistics
    streams:
      targets: fine_targets


  precision_recall:
    priority: 100.2
    type: PrecisionRecallStatistics
    use_word_mappings: True
    #show_class_scores: True
    globals:
      word_mappings: fine_label_word_mappings
    streams:
      targets: fine_targets

  answer_decoder:
    priority: 100.3
    type: WordDecoder
    import_word_mappings_from_globals: True
    globals:
      word_mappings: fine_label_word_mappings
    streams:
      inputs: predictions
      outputs: answers

  stream_viewer:
    priority: 100.4
    type: StreamViewer
    input_streams: coarse_targets, coarse_labels, fine_targets, fine_labels, answers

  image_viewer:
    priority: 100.5
    type: ImageViewer
    streams:
      images: inputs
      labels: fine_labels
      answers: coarse_labels  



================================================
FILE: configs/clevr/clevr_all_vgg_glove_lstm_concat_ffn.yml
================================================
# Load config defining CLEVR tasks for training, validation and testing.
default_configs: clevr/default_clevr.yml

# Resize and normalize images - in all sets.
training:
  task: 
    resize_image: [224, 224]
    image_preprocessing: normalize

validation:
  task: 
    resize_image: [224, 224]
    image_preprocessing: normalize

test:
  task: 
    resize_image: [224, 224]
    image_preprocessing: normalize

# Definition of the pipeline.
pipeline:

  global_publisher:
    priority: 0
    type: GlobalVariablePublisher
    keys: [question_encoder_output_size, image_encoder_output_size]
    values: [100, 100]

  ##################################################################
  # 1st pipeline: question.
  # Questions encoding.
  question_tokenizer:
    priority: 1.1
    type: SentenceTokenizer
    # Lowercase all letters + remove punctuation (reduced vocabulary of 80 words instead of 87)
    preprocessing: all
    streams: 
      inputs: questions
      outputs: tokenized_questions

  # Model 1: Embeddings
  question_embeddings:
    priority: 1.2
    type: SentenceEmbeddings
    embeddings_size: 50
    pretrained_embeddings_file: glove.6B.50d.txt
    data_folder: ~/data/CLEVR_v1.0
    word_mappings_file: questions.all.word.mappings.lowercase.csv
    export_word_mappings_to_globals: True
    globals:
      word_mappings: question_word_mappings
      vocabulary_size: num_question_words
    streams:
      inputs: tokenized_questions
      outputs: embedded_questions      
  
  # Model 2: RNN
  lstm:
    priority: 1.3
    type: RecurrentNeuralNetwork
    cell_type: LSTM
    prediction_mode: Last
    initial_state: Zero
    hidden_size: 50
    # Turn of softmax.
    use_logsoftmax: False
    streams:
      inputs: embedded_questions
      predictions: question_activations
    globals:
      input_size: embeddings_size
      prediction_size: question_encoder_output_size

  ##################################################################
  # 2nd subpipeline: image.
  # Image encoder.
  image_encoder:
    priority: 2.1
    type: GenericImageEncoder
    model_type: vgg16
    streams:
      inputs: images
      outputs: image_activations
    globals:
      output_size: image_encoder_output_size

  ##################################################################
  # 3rd subpipeline: concatenation + FF.
  concat:
    type: ConcatenateTensor
    priority: 3.1
    input_streams: [question_activations,image_activations]
    dim: 1 # default
    input_dims: [[-1,100],[-1,100]]
    output_dims: [-1,200]
    streams:
      outputs: concatenated_activations
    globals:
      output_size: concatenated_size

  classifier:
    type: FeedForwardNetwork 
    hidden_sizes: [100]
    priority: 3.2
    streams:
      inputs: concatenated_activations
    globals:
      input_size: concatenated_size
      prediction_size: num_answers

#: pipeline


================================================
FILE: configs/clevr/clevr_image_convnet_ffn.yml
================================================
# Load config defining CLEVR tasks for training, validation and testing.
default_configs: clevr/default_clevr.yml

# Definition of the pipeline.
pipeline:

  # Model consisting of two components.
  image_encoder:
    priority: 1.1
    type: ConvNetEncoder
    streams:
      inputs: images

  # Reshape inputs
  reshaper:
    priority: 1.2
    type: ReshapeTensor
    input_dims: [-1, 16, 58, 38]
    output_dims: [-1, 35264]
    streams:
      inputs: feature_maps
      outputs: reshaped_maps
    globals:
      output_size: reshaped_maps_size

  # Image classifier.
  classifier:
    priority: 1.3
    type: FeedForwardNetwork 
    hidden_sizes: [1000]
    streams:
      inputs: reshaped_maps
    globals:
      input_size: reshaped_maps_size
      prediction_size: num_answers

#: pipeline


================================================
FILE: configs/clevr/clevr_question_glove_lstm.yml
================================================
# Load config defining CLEVR tasks for training, validation and testing.
default_configs: clevr/default_clevr.yml

# This is unimodal (questino-based) baseline, thus stop streaming images - in all sets.
training:
  task: 
    stream_images: False

validation:
  task: 
    stream_images: False

test:
  task: 
    stream_images: False

# Definition of the pipeline.
pipeline:

  # Questions encoding.
  question_tokenizer:
    priority: 1.1
    type: SentenceTokenizer
    # Lowercase all letters + remove punctuation (reduced vocabulary of 80 words instead of 87)
    preprocessing: all
    streams: 
      inputs: questions
      outputs: tokenized_questions

  # Model 1: Embeddings
  question_embeddings:
    priority: 1.2
    type: SentenceEmbeddings
    embeddings_size: 50
    pretrained_embeddings_file: glove.6B.50d.txt
    data_folder: ~/data/CLEVR_v1.0
    word_mappings_file: questions.all.word.mappings.lowercase.csv
    export_word_mappings_to_globals: True
    globals:
      word_mappings: question_word_mappings
      vocabulary_size: num_question_words
    streams:
      inputs: tokenized_questions
      outputs: embedded_questions      
  
  # Model 2: RNN
  lstm:
    priority: 1.3
    type: RecurrentNeuralNetwork
    cell_type: LSTM
    prediction_mode: Last
    initial_state: Zero
    hidden_size: 50
    streams:
      inputs: embedded_questions
    globals:
      input_size: embeddings_size
      prediction_size: num_answers


#: pipeline


================================================
FILE: configs/clevr/default_clevr.yml
================================================
# Training parameters:
training:
  task: 
    type: CLEVR
    batch_size: &b 64
    split: training
    #resize_image: [224, 224]
  # optimizer parameters:
  optimizer:
    type: Adam
    lr: 0.0001
  # settings parameters
  terminal_conditions:
    loss_stop_threshold: 0.05
    early_stop_validations: -1
    episode_limit: 10000
    epoch_limit: 10

# Validation parameters:
validation:
  task:
    type: CLEVR
    batch_size: *b
    split: validation
    #resize_image: [224, 224]

# Testing parameters:
test:
  task:
    type: CLEVR
    batch_size: *b
    split: test
    #resize_image: [224, 224]

pipeline:
  name: tmp
  disable: image_viewer

  label_to_target:
    type: LabelIndexer
    priority: 0.1
    # Load word mappings for answers.
    data_folder: ~/data/CLEVR_v1.0
    word_mappings_file: answers.all.word.mappings.csv
    export_word_mappings_to_globals: True
    globals:
      word_mappings: answer_word_mappings
      vocabulary_size: num_answers
    streams:
      inputs: answers 
      outputs: target_answers


  # Loss
  nllloss:
    type: NLLLoss
    priority: 10.1
    streams:
      targets: target_answers

  # Statistics.
  batch_size:
    priority: 100.0
    type: BatchSizeStatistics

  accuracy:
    priority: 100.1
    type: AccuracyStatistics
    streams:
      targets: target_answers

  precision_recall:
    priority: 100.2
    type: PrecisionRecallStatistics
    use_word_mappings: True
    show_class_scores: True
    globals:
      word_mappings: answer_word_mappings
    streams:
      targets: target_answers

  answer_decoder:
    priority: 100.3
    type: WordDecoder
    import_word_mappings_from_globals: True
    globals:
      word_mappings: answer_word_mappings
    streams:
      inputs: predictions
      outputs: predicted_answers

  stream_viewer:
    priority: 100.4
    type: StreamViewer
    input_streams: indices, questions, target_answers, predicted_answers

  #image_viewer:
  #  priority: 100.5
  #  type: ImageViewer
  #  streams:
  #    images: inputs
  #    labels: labels
  #    answers: answers



================================================
FILE: configs/default/components/language/bow_encoder.yml
================================================
# This file defines the default values for BOW Encoder.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Length of the bag-of-word vector.
  bow_size: bow_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/language/label_indexer.yml
================================================
# This file defines the default values for LabelIndexer.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/'

# Source files that will be used to create the vocabulary  (LOADED)
source_vocabulary_files: ''

# Additional tokens that will be added to vocabulary (LOADED)
additional_tokens: ''

# File containing word (LOADED)
word_mappings_file: 'word_mappings.csv'

# HACK: This key is useless here, but needed by parent class. Should be removed/fixed in the future
export_pad_index_to_globals: False

# If set, component will always (re)generate the vocabulary (LOADED)
regenerate: False 

# Flag informing whether word mappings will be imported from globals (LOADED)
import_word_mappings_from_globals: False

# Flag informing whether word mappings will be exported to globals (LOADED)
export_word_mappings_to_globals: False

# Value that will be used when word is out of vocabulary (LOADED)
# (Mask for that element will be 0 as well)
# -100 is the default value used by PyTroch loss functions to specify
# target values that will ignored and does not contribute to the input gradient.
# (ignore_index=-100)
out_of_vocabulary_value: -100

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # The loaded/exported word mappings (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  word_mappings: word_mappings

  # Size of the vocabulary (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  vocabulary_size: vocabulary_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/language/sentence_indexer.yml
================================================
# This file defines the default values for Sentence Indexer.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/'

# Source files that will be used to create the vocabulary  (LOADED)
source_vocabulary_files: ''

# Additional tokens that will be added to vocabulary (LOADED)
# This list can be extended, but <PAD> and <EOS> are special tokens.
# <PAD> is ALWAYS used for padding shorter sequences.
additional_tokens: '<PAD>,<EOS>'

# Enable <EOS> (end of sequence) token.
eos_token: False

# HACK: This key is useless here, but needed by parent class. Should be removed/fixed in the future
export_pad_index_to_globals: False

# File containing word (LOADED)
word_mappings_file: 'word_mappings.csv'

# If set, component will always (re)generate the vocabulary (LOADED)
regenerate: False 

# Flag informing whether word mappings will be imported from globals (LOADED)
import_word_mappings_from_globals: False

# Flag informing whether word mappings will be exported to globals (LOADED)
export_word_mappings_to_globals: False

# Fixed padding length
# -1  -> For each batch, automatically pad to the length of the longest sequence of the batch
#        (variable from batch to batch)
# > 0 -> Pad each pad to the chosen length (fixed for all batches)
fixed_padding: -1

# Operation mode. If 'reverse' is True, then it will change indices into words (LOADED)
reverse: False

# Flag indicating whether inputs are represented as distributions or indices (LOADED)
# Options: True (expects distribution for each input item in sequence)
#          False (expects indices (max args))
use_input_distributions: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # The loaded/exported word mappings (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  word_mappings: word_mappings

  # Size of the vocabulary (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  vocabulary_size: vocabulary_size

  # Index of the <PAD> token
  # Will be set only if `export_pad_mapping_to_globals == True`
  pad_index: pad_index

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/language/sentence_one_hot_encoder.yml
================================================
# This file defines the default values for Sentence 1-hot Encoder.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/'

# Source files that will be used to create the vocabulary  (LOADED)
source_vocabulary_files: ''

# Additional tokens that will be added to vocabulary (LOADED)
additional_tokens: ''

# File containing word (LOADED)
word_mappings_file: 'word_mappings.csv'

# HACK: This key is useless here, but needed by parent class. Should be removed/fixed in the future
export_pad_index_to_globals: False

# If set, component will always (re)generate the vocabulary (LOADED)
regenerate: False 

# Flag informing whether word mappings will be imported from globals (LOADED)
import_word_mappings_from_globals: False

# Flag informing whether word mappings will be exported to globals (LOADED)
export_word_mappings_to_globals: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # The loaded/exported word mappings (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  word_mappings: word_mappings

  # Size of the vocabulary (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  vocabulary_size: vocabulary_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/language/sentence_tokenizer.yml
================================================
# This file defines the default values for Sentence Tokenizer.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Operation mode (LOADED)
# False: sentence -> list of strings, True: list of strings -> sentence.
detokenize: False 

# Select applied preprocessing/augmentations (LOADED)
# Use one (or more) of the transformations:
# none | lowercase | remove_punctuation | all
# Accepted formats: a,b,c or [a,b,c]
preprocessing: none

# List of characters to be removed 
remove_characters: ''

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input (detokenized or tokenized) sentences (INPUT)
  inputs: inputs

  # Stream containing output (tokenized or detokenized) sentences (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/language/word_decoder.yml
================================================
# This file defines the default values for Word Decoder.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/'

# Source files that will be used to create the vocabulary  (LOADED)
source_vocabulary_files: ''

# Additional tokens that will be added to vocabulary (LOADED)
additional_tokens: ''

# HACK: This key is useless here, but needed by parent class. Should be removed/fixed in the future
export_pad_index_to_globals: False

# File containing word (LOADED)
word_mappings_file: 'word_mappings.csv'

# If set, component will always (re)generate the vocabulary (LOADED)
regenerate: False 

# Flag informing whether word mappings will be imported from globals (LOADED)
import_word_mappings_from_globals: False

# Flag informing whether word mappings will be exported to globals (LOADED)
export_word_mappings_to_globals: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output words (OUTPUT)
  outputs: outputs

globals: 
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # The loaded/exported word mappings (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  word_mappings: word_mappings

  # Size of the vocabulary (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  vocabulary_size: vocabulary_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/losses/nll_loss.yml
================================================
# This file defines the default values for the NLL Loss.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Number of dimensions of targets, so the loss can work with 
# diffent inputs/targets (LOADED)
num_targets_dims: 1

# Loss function (LOADED)
# Options: NLLLoss | CrossEntropyLoss (NOT OPERATIONAL YET!)
# loss_function: NLLLoss

# When set to True, performs masking of selected samples from batch (LOADED)
use_masking: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing targets (label ids) (INPUT)
  targets: targets

  # Stream containing batch of predictions (INPUT)
  predictions: predictions

  # Stream containing masks used for masking of selected samples from batch (INPUT)
  masks: masks

  # Stream containing loss (OUTPUT)
  loss: loss

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Target value to ignore (masking)
  ignore_index: ignore_index

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/masking/join_masked_predictions.yml
================================================
# This file defines the default values for the Join Masked Predictions component.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# List of input stream names, each containing batch of predictions (LOADED)
input_prediction_streams: ''

# List of input stream names, each containing batch of masks (LOADED)
input_mask_streams: ''

# List of word mapping names - those will be loaded from globals (LOADED)
input_word_mappings: ''

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of output strings (OUTPUT)
  output_strings: output_strings

  # Stream containing batch of output indices (OUTPUT)
  # WARNING: As performed operations are not differentiable,
  # those indices cannot be used for e.g. calculation of loss!!
  output_indices: output_indices

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Vocabulary used to produce output strings (RETRIEVED)
  output_word_mappings: output_word_mappings

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/masking/string_to_mask.yml
================================================
# This file defines the default values for the String To Mask component.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Value that will be used when word is out of vocabulary (LOADED)
# (Mask for that element will be 0 as well)
# -100 is the default value used by PyTroch loss functions to specify
# target values that will ignored and does not contribute to the input gradient.
# (ignore_index=-100)
out_of_vocabulary_value: -100

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input strings (INPUT)
  strings: strings

  # Stream containing output masks (OUTPUT)
  masks: masks

  # Stream containing output indices (OUTPUT)
  string_indices: string_indices

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Vocabulary used to produce masks and indices (RETRIEVED)
  word_mappings: word_mappings

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/general_usage/attention_decoder.yml
================================================
# This file defines the default values for the RNN model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Size of the hidden state (LOADED)
hidden_size: 100

# Wether to include the last hidden state in the outputs
output_last_state: False

# Type of recurrent cell (LOADED)
# -> Only GRU is supported

# Number of "stacked" layers (LOADED)
# -> Only a single layer is supported

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

# Prediction mode (LOADED)
# Options: 
#   * Dense (passes every activation through output layer) |
#   * Last (passes only the last activation though output layer) |
#   * None (all outputs are discarded)
prediction_mode: Dense

# Enable FFN layer at the output of the RNN (before eventual feed back in the case of autoregression).
# Useful if the raw outputs of the RNN are needed, for attention encoder-decoder for example.
ffn_output: True

# Length of generated output sequence (LOADED)
# User must set it per task, as it is task specific.
autoregression_length: 10

# If true, output of the last layer will be additionally processed with Log Softmax (LOADED)
use_logsoftmax: True

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of encoder outputs (INPUT)
  inputs: inputs

  # Stream containing the inital state of the RNN (INPUT)
  # The stream will be actually created only if `inital_state: Input`
  input_state: input_state

  # Stream containing predictions (OUTPUT)
  predictions: predictions

  # Stream containing the final output state of the RNN (output)
  # The stream will be actually created only if `output_last_state: True`
  output_state: output_state

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the input (RETRIEVED)
  input_size: input_size

  # Size of the prediction (RETRIEVED)
  prediction_size: prediction_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/general_usage/feed_forward_network.yml
================================================
# This file defines the default values for the Multi-Layer Feed-Forward Network.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Optional (LOADED)
# Number of hidden layers, along with their sizes (numbers of neurons).
# hidden_sizes: [dim hidden 1, dim hidden 2, ...]

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

# If true, output of the last layer will be additionally processed with Log Softmax (LOADED)
use_logsoftmax: True

# Number of dimensions, where:
#   - 2 means [Batch size, Input size]
#   - n means [Batch size, dim 1, ..., dim n-2, Input size]
# And the FFN is broadcasted over the last (Input Size) Dimension.
# Also, all the dimensions sizes but the last are conserved, as the FFN is applied over the last dimension.
dimensions: 2

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of inputs (INPUT)
  inputs: inputs

  # Stream containing predictions (OUTPUT)
  predictions: predictions

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the input (RETRIEVED)
  input_size: input_size

  # Size of the prediction (RETRIEVED)
  prediction_size: prediction_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/general_usage/recurrent_neural_network.yml
================================================
# This file defines the default values for the RNN model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Size of the hidden state (LOADED)
hidden_size: 100

# Flag informing the model to learn the intial state (h0/c0) (LOADED)
# When false, (c0/c0) will be initialized as zeros.

# Input mode (LOADED)
# Options:
#   * Dense (every iteration expects an input)
#   * Autoregression_First (Autoregression, expects an input for the first iteration)
#   * Autoregression_None (Autoregression, first input will be a null vector)
input_mode: Dense

# Prediction mode (LOADED)
# Options:
#   * Dense (passes every activation through output layer) |
#   * Last (passes only the last activation through output layer) |
#   * None (all outputs are discarded)
prediction_mode: Dense

# Maximal length of generated output sequence when working in auto-regression mode (LOADED)
# User must set it per task, as it is task specific.
# max_autoregression_length: x

# Initial state type (LOADED)
#   * Zero (Vector of zeros, not trainable)
#   * Trainable (xavier initialization, trainable)
#   * Input (the initial hidden state comes from an input stream)
initial_state: Trainable

# Type of recurrent cell (LOADED)
# Options: LSTM | GRU | RNN_TANH | RNN_RELU
cell_type: LSTM

# Number of "stacked" layers (LOADED)
num_layers: 1

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

# Enable FFN layer at the output of the RNN (LOADED)
# Useful if the raw outputs of the RNN are needed, for attention encoder-decoder for example.
use_output_layer: True

# Wether to include the last hidden state in the outputs
output_last_state: False

# If true, output of the last layer will be additionally processed with Log Softmax (LOADED)
use_logsoftmax: True

streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of images (INPUT)
  inputs: inputs

  # Stream containing the inital state of the RNN (INPUT)
  # The stream will be actually created only if `inital_state: Input`
  input_state: input_state

  # Stream containing predictions (OUTPUT)
  predictions: predictions

  # Stream containing the final output state of the RNN (output)
  # The stream will be actually created only if `output_last_state: True`
  output_state: output_state

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the input (RETRIEVED)
  input_size: input_size

  # Size of the prediction (RETRIEVED)
  prediction_size: prediction_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################


================================================
FILE: configs/default/components/models/general_usage/seq2seq.yml
================================================
# This file defines the default values for the RNN model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Size of the hidden state (LOADED)
hidden_size: 100

# Flag informing the model to learn the intial state (h0/c0) (LOADED)
# When false, (c0/c0) will be initialized as zeros.

# Initial state type:
#   * Zero (null vector)
#   * Trainable (xavier initialization, trainable)
#   * Input (the initial hidden state comes from an input stream)
initial_state: Trainable

# Wether to include the last hidden state in the outputs
output_last_state: False

# Type of recurrent cell (LOADED)
# Options: LSTM | GRU | RNN_TANH | RNN_RELU
cell_type: LSTM

# Number of "stacked" layers (LOADED)
num_layers: 1

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

# Prediction mode (LOADED)
# Options: 
#   * Dense (passes every activation through output layer) |
#   * Last (passes only the last activation though output layer) |
#   * None (all outputs are discarded)
prediction_mode: Dense

# Input mode
# Options:
#   * Dense (every iteration expects an input)
#   * Autoregression_First (Autoregression, expects an input for the first iteration)
#   * Autoregression_None (Autoregression, first input will be a null vector)
input_mode: Dense

autoregression_length: 50

# If true, output of the last layer will be additionally processed with Log Softmax (LOADED)
use_logsoftmax: True

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of images (INPUT)
  inputs: inputs

  # Stream containing predictions (OUTPUT)
  predictions: predictions

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the input (RETRIEVED)
  input_size: input_size

  # Size of the prediction (RETRIEVED)
  prediction_size: prediction_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/language/index_embeddings.yml
================================================
# This file defines the default values for the Index Embeddings.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Size of the embeddings (LOADED)
# Need to be set by  the user.
# embeddings_size: 100

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of inputs (INPUT)
  inputs: inputs

  # Stream containing predictions (OUTPUT)
  predictions: predictions

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the vocabulary (RETRIEVED)
  vocab_size: vocab_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Size of the embeddings (SET)
  # It is exported to globals, so other components can use it during
  # their initialization.
  embeddings_size: embeddings_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/language/sentence_embeddings.yml
================================================
# This file defines the default values for the Sentence Embeddings.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/'

# Source files that will be used to create the vocabulary  (LOADED)
source_vocabulary_files: ''

# Additional tokens that will be added to vocabulary (LOADED)
# This list can be extended, but <PAD> and <EOS> are special tokens.
# <PAD> is ALWAYS used for padding shorter sequences.
additional_tokens: '<PAD>'

# Enable <EOS> (end of sequence) token.
eos_token: False

export_pad_index_to_globals: False

# File containing word (LOADED)
word_mappings_file: 'word_mappings.csv'

# If set, component will always (re)generate the vocabulary (LOADED)
regenerate: False 

# Flag informing whether word mappings will be imported from globals (LOADED)
import_word_mappings_from_globals: False

# Flag informing whether word mappings will be exported to globals (LOADED)
export_word_mappings_to_globals: False

# Fixed padding length
# -1  -> For each batch, automatically pad to the length of the longest sequence of the batch
#        (variable from batch to batch)
# > 0 -> Pad each pad to the chosen length (fixed for all batches)
fixed_padding: -1

# File containing pretrained embeddings (LOADED)
# Empty means that no embeddings will be loaded.
# Options: 
# '' | glove.6B.50d.txt | glove.6B.100d.txt | glove.6B.200d.txt | glove.6B.300d.txt |
# glove.42B.300d.txt | glove.840B.300d.txt | glove.twitter.27B.txt | mimic.fastText.no_clean.300d.pickled
pretrained_embeddings_file: ''

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of inputs (INPUT)
  inputs: inputs

  # Stream containing predictions (OUTPUT)
  predictions: predictions

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # The loaded/exported word mappings (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  word_mappings: word_mappings

  # Size of the vocabulary (RETRIEVED/SET)
  # This depends on the import/export configuration flags above.
  vocabulary_size: vocabulary_size

  # Size of the embeddings (SET)
  # It is exported to globals, so other components can use it during
  # their initialization.
  embeddings_size: embeddings_size

  # Index of the <PAD> token
  # Will be set only if `export_pad_mapping_to_globals == True`
  pad_index: pad_index

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/multi_modal_reasoning/compact_bilinear_pooling.yml
================================================
# This file defines the default values for the Multimodal Compact Bilinear Pooling model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Parameter denoting whether projection matrices are trainable (LOADED)
# Setting flag that to true will result in trainable, dense (i.e. not "sketch") projection layers.
trainable_projections: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of encoded images (INPUT)
  image_encodings: image_encodings

  # Stream containing batch of encoded questions (INPUT)
  question_encodings: question_encodings

  # Stream containing outputs (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the image encodings input (RETRIEVED)
  image_encoding_size: image_encoding_size

  # Size of the question encodings input (RETRIEVED)
  question_encoding_size: question_encoding_size

  # Size of the output (RETRIEVED)
  output_size: output_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/multi_modal_reasoning/factorized_bilinear_pooling.yml
================================================
# This file defines the default values for the FactorizedBilinearPooling model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

# Size of the latent space (LOADED)
latent_size: 100

# Factor used for sum pooling (LOADED)
pool_factor: 2


streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of encoded images (INPUT)
  image_encodings: image_encodings

  # Stream containing batch of encoded questions (INPUT)
  question_encodings: question_encodings

  # Stream containing outputs (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the image encodings input (RETRIEVED)
  image_encoding_size: image_encoding_size

  # Size of the question encodings input (RETRIEVED)
  question_encoding_size: question_encoding_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Size of the output (SET)
  output_size: output_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################


================================================
FILE: configs/default/components/models/multi_modal_reasoning/low_rank_bilinear_pooling.yml
================================================
# This file defines the default values for the LowRankBilinearPooling model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of encoded images (INPUT)
  image_encodings: image_encodings

  # Stream containing batch of encoded questions (INPUT)
  question_encodings: question_encodings

  # Stream containing outputs (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the image encodings input (RETRIEVED)
  image_encoding_size: image_encoding_size

  # Size of the question encodings input (RETRIEVED)
  question_encoding_size: question_encoding_size

  # Size of the output (RETRIEVED)
  output_size: output_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/multi_modal_reasoning/question_driven_attention.yml
================================================
# This file defines the default values for the QuestionDrivenAttention model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

# Size of the latent space (LOADED)
latent_size: 100

# Number of attention heads (LOADED)
num_attention_heads: 2

# Type of output returned
# Options: Image | Fusion
# Details: attention-weighted image |  concatenation of attention-weighted image and RNN encoded question
output_mode: Fusion


streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of encoded images (INPUT)
  feature_maps: feature_maps

  # Stream containing batch of encoded questions (INPUT)
  question_encodings: question_encodings

  # Stream containing outputs (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Height of the features tensor (RETRIEVED)
  feature_maps_height: feature_maps_height

  # Width of the features tensor (RETRIEVED)
  feature_maps_width: feature_maps_width

  # Depth of the features tensor (RETRIEVED)
  feature_maps_depth: feature_maps_depth

  # Size of the question encodings input (RETRIEVED)
  question_encoding_size: question_encoding_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Size of the output (SET)
  output_size: output_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################


================================================
FILE: configs/default/components/models/multi_modal_reasoning/relational_network.yml
================================================
# This file defines the default values for the LowRankBilinearPooling model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

# Number of layers along with their sizes (numbers of neurons) of g_theta network (LOADED)
g_theta_sizes: [256, 256, 256]

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of encoded images (INPUT)
  feature_maps: feature_maps

  # Stream containing batch of encoded questions (INPUT)
  question_encodings: question_encodings

  # Stream containing outputs (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Height of the features tensor (RETRIEVED)
  feature_maps_height: feature_maps_height

  # Width of the features tensor (RETRIEVED)
  feature_maps_width: feature_maps_width

  # Depth of the features tensor (RETRIEVED)
  feature_maps_depth: feature_maps_depth

  # Size of the question encodings input (RETRIEVED)
  question_encoding_size: question_encoding_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Size of the output (SET)
  output_size: output_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/multi_modal_reasoning/self_attention.yml
================================================
# This file defines the default values for the Self_Attention model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Dropout rate (LOADED)
# Default: 0 (means that it is turned off)
dropout_rate: 0

# Size of the latent space (LOADED)
latent_size: 256

# Number of attention heads (LOADED)
num_attention_heads: 4


streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of encoded questions (INPUT)
  question_encodings: question_encodings

  # Stream containing outputs (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the question encodings input (RETRIEVED)
  question_encoding_size: question_encoding_size

  # Size of the output (RETRIEVED)
  output_size: output_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################


================================================
FILE: configs/default/components/models/vision/convnet_encoder.yml
================================================
# This file defines the default values for the simple 3-layer ConvNet model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Parameters defining the first convolutional layer (LOADED)
conv1:
    out_channels: 64
    kernel_size: 3
    stride: 1
    padding: 0

# Parameters defining the first max-pooling layer (LOADED)
maxpool1:
    kernel_size: 2

# Parameters defining the second convolutional layer (LOADED)
conv2:
    out_channels: 32
    kernel_size: 3
    stride: 1
    padding: 0

# Parameters defining the second max-pooling layer (LOADED)
maxpool2:
    kernel_size: 2

# Parameters defining the third convolutional layer (LOADED)
conv3:
    out_channels: 16
    kernel_size: 3
    stride: 1
    padding: 0

# Parameters defining the third max-pooling layer (LOADED)
maxpool3:
    kernel_size: 2

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of inputs (INPUT)
  inputs: inputs

  # Stream containing batch of feature maps (OUTPUT)
  feature_maps: feature_maps

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Height dimension of the input (RETRIEVED)
  input_height: image_height

  # Width dimension of the image (RETRIEVED)
  input_width: image_width

  # Depth dimension of the image (RETRIEVED)
  input_depth: image_depth

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Height of the feature map (SET)
  feature_map_height: feature_map_height

  # Width of the feature map (SET)
  feature_map_width: feature_map_width

  # Depth of the feature map (SET)
  feature_map_depth: feature_map_depth

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/models/vision/generic_image_encoder.yml
================================================
# This file defines the default values for the component wrapping (pretrained) Torch Vision models.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Model type (LOADED)
# Options: vgg16 | densenet121 | resnet152 | resnet50
model_type: vgg16

# Parameter denoting whether the component will return (flat) prediction
# or output of last feature layer (LOADED)
return_feature_maps: False

# Load weights of a model pretrained on ImageNet (LOADED)
pretrained: True

streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of images (INPUT)
  inputs: inputs

  # Stream containing outputs (features or "predictions") (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the output (RETRIEVED)
  # Used when return_features = False.
  output_size: output_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Height of the returned features tensor (SET)
  # Used when return_features = True.
  feature_maps_height: feature_maps_height

  # Width of the returned features tensor (SET)
  # Used when return_features = True.
  feature_maps_width: feature_maps_width

  # Depth of the returned features tensor (SET)
  # Used when return_features = True.
  feature_maps_depth: feature_maps_depth

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################


================================================
FILE: configs/default/components/models/vision/lenet5.yml
================================================
# This file defines the default values for the LeNet5 model.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of images (INPUT)
  inputs: inputs

  # Stream containing predictions (OUTPUT)
  predictions: predictions

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the prediction (RETRIEVED)
  prediction_size: prediction_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/publishers/global_variable_publisher.yml
================================================
# This file defines the default values for the Global Variable Publisher.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# List of keys of variables that will be added to globals (LOADED)
# an be both list of strings or a single string with comma-separated values.
keys: ''

# List of values - must be a single value or a list (LOADED)
values: ''

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/publishers/stream_file_exporter.yml
================================================
# This file defines the default values for the Stream File Exporter.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# List of names of streams that will be displayed (LOADED)
# Can be string a single name or or comma separated string with list
input_streams: ''

# Separator that will be placed between values (LOADED)
separator: ','

# Adds additional line to output file enabling Excel to use different separator while loading (LOADED)
export_separator_line_to_csv: False

# Adds additional line to output with header (LOADED)
export_header_to_csv: False

# Name of the file containing output values (LOADED)
filename: 'outputs.txt'

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/statistics/accuracy_statistics.yml
================================================
# This file defines the default values for the Accuracy statistics.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Flag indicating whether prediction are represented as distributions or indices (LOADED)
# Options: True (expects distribution for each preditions)
#          False (expects indices (max args))
use_prediction_distributions: True

# When set to True, performs masking of selected samples from batch (LOADED)
use_masking: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing targets (label ids) (INPUT)
  targets: targets

  # Stream containing batch of predictions (INPUT)
  predictions: predictions

  # Stream containing masks used for masking of selected samples from batch (INPUT)
  masks: masks

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

statistics:
  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################

  # Name used for collected statistics (ADDED).
  accuracy: accuracy




================================================
FILE: configs/default/components/statistics/batch_size_statistics.yml
================================================
# This file defines the default values for the Batch size statistics.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (INPUT)
  indices: indices

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/statistics/bleu_statistics.yml
================================================
# This file defines the default values for the BLEU statistics.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Flag indicating whether prediction are represented as distributions or indices (LOADED)
# Options: True (expects distribution for each preditions)
#          False (expects indices (max args))
use_prediction_distributions: True

# When set to True, performs masking of selected samples from batch (LOADED)
# TODO!
#use_masking: False

# Ignored words - useful for ignoring special tokens
ignored_words: ["<PAD>", "<EOS>"]

# Weights of n-grams used when calculating the score.
weights: [0.25, 0.25, 0.25, 0.25]

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing targets (label ids) (INPUT)
  targets: targets

  # Stream containing batch of predictions (INPUT)
  predictions: predictions

  # Stream containing masks used for masking of selected samples from batch (INPUT)
  #masks: masks

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Word mappings used for mappings of predictions/targets into list of words (RERIEVED)
  word_mappings: word_mappings

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

statistics:
  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################

  # Name used for collected statistics (ADDED).
  bleu: bleu




================================================
FILE: configs/default/components/statistics/precision_recall_statistics.yml
================================================
# This file defines the default values for the PrecisionRecall statistics.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Flag indicating whether prediction are represented as distributions or indices (LOADED)
# Options: True (expects distribution for each preditions)
#          False (expects indices (max args))
use_prediction_distributions: True

# Flag indicating whether confusion matrix will be shown (LOADED)
show_confusion_matrix: False

# Flag indicating whether detailed scores for each class will be shown (LOADED)
show_class_scores: False

# When set to true, will use the provided word mappings as labels (LOADED)
use_word_mappings: False

# When set to True, performs masking of selected samples from batch (LOADED)
use_masking: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing targets (label ids) (INPUT)
  targets: targets

  # Stream containing batch of predictions (INPUT)
  predictions: predictions

  # Stream containing masks used for masking of selected samples from batch (INPUT)
  masks: masks

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Word mappings (optionally) used as labels (RETRIEVED)
  word_mappings: word_mappings

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

statistics:
  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################

  # Names used for collected statistics (ADDED).
  precision: precision
  recall: recall
  f1score: f1score




================================================
FILE: configs/default/components/tasks/image_text_to_class/clevr.yml
================================================
# This file defines the default values for the CLEVR task.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/CLEVR_v1.0'

# Defines the set (split) that will be used (LOADED)
# Options: training | validation | test | cogent_a_training | cogent_a_validation | cogent_b_validation
split: training

# Flag indicating whether the task will load and return images (LOADED)
stream_images: True

# Resize parameter (LOADED)
# When present, resizes the images from original size to [height, width]
# Depth remains set to 3.
#resize_image: [height, width]

# Select applied image preprocessing/augmentations (LOADED)
# Use one (or more) of the affine transformations:
# none | normalize | all
# Accepted formats: a,b,c or [a,b,c]
image_preprocessing: none

streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of images (OUTPUT)
  images: images

  # Stream containing batch of image names (OUTPUT)
  image_ids: image_ids

  # Stream containing batch of questions (OUTPUT)
  questions: questions

  # Stream containing targets - answers (OUTPUT)
  answers: answers

  # Stream containing scene descriptions (OUTPUT)
  #answers: scene_graphs

  # Stream containing batch with question type - indices (OUTPUT)
  category_ids: question_type_ids

  # Stream containing batch with question type - names (OUTPUT)
  category_names: question_type_names

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Width of the image (SET)
  input_width: image_width
  # Height of the image (SET)
  input_height: image_height
  # Depth of the image (SET)
  input_depth: image_depth

  # Question type (word-idx) mappings (SET)
  question_type_word_mappings: question_type_word_mappings

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################


================================================
FILE: configs/default/components/tasks/image_text_to_class/gqa.yml
================================================
# This file defines the default values for the GQA task.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/gqa'

# Defines the set (split) that will be used (LOADED)
# Options: training_0 | training | validation | test_dev | test | challenge | submission (?)
# Note: test_dev should be used for validation.
split: training_0

# Flag indicating whether the task will load and return images (LOADED)
stream_images: True

# Resize parameter (LOADED)
# When present, resizes the images from original size to [height, width]
# Depth remains set to 3.
resize_image: [224, 224]

# Select applied image preprocessing/augmentations (LOADED)
# Use one (or more) of the affine transformations:
# none | normalize | all
# Accepted formats: a,b,c or [a,b,c]
image_preprocessing: none

streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of sample (original) identifiers (OUTPUT)
  sample_ids: sample_ids

  # Stream containing batch of images (OUTPUT)
  images: images

  # Stream containing batch of image names (OUTPUT)
  image_ids: image_ids

  # Stream containing batch of questions (OUTPUT)
  questions: questions

  # Stream containing targets answers (labels) (OUTPUT)
  answers: answers

  # Stream containing targets answers consisting of many words (OUTPUT)
  full_answers: full_answers

  # Stream containing scene descriptions (OUTPUT)
  #answers: scene_graphs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Width of the image (SET)
  input_width: image_width
  # Height of the image (SET)
  input_height: image_height
  # Depth of the image (SET)
  input_depth: image_depth

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################


================================================
FILE: configs/default/components/tasks/image_text_to_class/vqa_med_2019.yml
================================================
# This file defines the default values for the VQAMED2019 task.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/vqa-med'

# Defines the set (split) that will be used (LOADED)
# Options: training | validation | training_validation | test_answers | test
split: training

# Defines the categoriees that will be used (LOADED)
# Options: all | c1 | c2 | c3 | c4 (or any combination of the latter 4)
categories: all

# Flag indicating whether the task will load and return images (LOADED)
stream_images: True

# Flag indicating whether images will be preloaded (i.e. loaded once at start) (LOADED)
# WARNING: if this option is active, the images will also be "preprocessed" at start.
# This means that preloading should not be used when one needs to use the random augmentations!
preload_images: False

# Resize parameter (LOADED)
# When present, resizes the images from original size to [height, width]
# Depth remains set to 3.
#resize_image: [height, width]

# Scale parameter [height, width] (LOADED)
# Task will use those values to rescale the image_sizes to range (0, 1).
scale_image_size: [2414, 2323]

# Select applied image preprocessing/augmentations (LOADED)
# Use one (or more) of the affine transformations:
# none | random_affine | random_horizontal_flip | normalize | all
# Accepted formats: a,b,c or [a,b,c]
image_preprocessing: normalize

# Select applied question preprocessing/augmentations (LOADED)
# Use one (or more) of the transformations:
# none | lowercase | remove_punctuation | tokenize | random_remove_stop_words | random_shuffle_words | all
# Accepted formats: a,b,c or [a,b,c]
question_preprocessing: lowercase, remove_punctuation

# Select applied question preprocessing (LOADED)
# Use one (or more) of the transformations:
# none | lowercase | remove_punctuation | tokenize | all
# Accepted formats: a,b,c or [a,b,c]
answer_preprocessing: none

# When filename is not empty, task will calculate weights associated with all samples
# by looking at the distribution of all answers from all loaded samples (LOADED)
# Those weights can be next used by weighted samplers (e.g. kFoldWeightedSampler)
export_sample_weights: ''

# Shuffle the indices of the input (source) files/samples.
# Leaving that to false will results in the original order of files samples,
# i.e. C1, then C2, then C3 etc.
shuffle_indices: False

# Generate and export (potentially shuffled) indices (LOADED)
# If not empty, will:
#  * shuffle indices of all samples and export them to a file.
#  * use those indices during sampling.
export_indices: ''

# Import (potentially shuffled) indices (LOADED)
# If not empty, will:
#  * import them to a file.
#  * use those indices during sampling.
import_indices: ''

streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of images (OUTPUT)
  images: images

  # Stream containing batch of image names (OUTPUT)
  image_ids: image_ids

  # Stream containing batch with original sizes of images (OUTPUT)
  image_sizes: image_sizes

  # Stream containing batch of questions (OUTPUT)
  questions: questions

  # Stream containing targets - answers (OUTPUT)
  answers: answers

  # Stream containing batch with question categories - indices (OUTPUT)
  category_ids: category_ids

  # Stream containing batch with question categories - names (OUTPUT)
  category_names: category_names

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Width of the image (SET)
  input_width: image_width
  # Height of the image (SET)
  input_height: image_height
  # Depth of the image (SET)
  input_depth: image_depth
  # Category (word-idx) mappings (SET)
  category_word_mappings: category_word_mappings

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################


================================================
FILE: configs/default/components/tasks/image_to_class/cifar_100.yml
================================================
# This file defines the default values for the CIFAR-100 task.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/cifar-100'

# Defines the set that will be used used (LOADED)
# True: training set | False: test set.
use_train_data: True

# Optional parameter (LOADED)
# When present, resizes the CIFAR images from [32,32] to [width, height]
#resize_image: [height, width]

streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of images (OUTPUT)
  images: images

  # Streams containing targets (label ids) (OUTPUT)
  coarse_targets: coarse_targets
  fine_targets: fine_targets

  # Streams containing labels (words) (OUTPUT)
  coarse_labels: coarse_labels
  fine_labels: fine_labels

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Width of the image (SET)
  input_width: image_width
  # Height of the image (SET)
  input_height: image_height
  # Depth of the image (SET)
  input_depth: image_depth
  
  # Numbers of output classes (SET)
  coarse_num_classes: coarse_num_classes
  fine_num_classes: fine_num_classes

  # Labels (word-idx) mappings (SET)
  coarse_label_word_mappings: coarse_label_word_mappings
  fine_label_word_mappings: fine_label_word_mappings

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/tasks/image_to_class/mnist.yml
================================================
# This file defines the default values for the MNIST task.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/mnist'

# Defines the set that will be used used (LOADED)
# True: training set | False: test set.
use_train_data: True

# Optional parameter (LOADED)
# When present, resizes the MNIST images from [28,28] to [width, height]
#resize_image: [height, width]

streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of images (OUTPUT)
  inputs: inputs

  # Stream containing targets (label ids) (OUTPUT)
  targets: targets

  # Stream containing labels (words) (OUTPUT)
  labels: labels

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Width of the image (SET)
  input_width: image_width
  # Height of the image (SET)
  input_height: image_height
  # Depth of the image (SET)
  input_depth: image_depth
  
  # Number of output classes: 10 (SET)
  num_classes: num_classes
  # Label (word-idx) mappings (SET)
  label_word_mappings: label_word_mappings

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/tasks/image_to_class/simple_molecules.yml
================================================
# This file defines the default values for the MNIST task.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/simple-molecules'

# Defines the split that will be used used (LOADED)
# Options: training | validation | test | test_mirror_blur | test_on_grid | test_handwritten
split: training

# Optional parameter (LOADED)
# When present, resizes the MNIST images from [28,28] to [width, height]
#resize_image: [height, width]

# Depth of the retured image (LOADED)
# Options: 1 | 3 
image_depth: 1

streams:
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of images (OUTPUT)
  images: images

  # Stream containing targets (label ids) (OUTPUT)
  targets: targets

  # Stream containing labels (words) (OUTPUT)
  labels: labels

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Width of the image (SET)
  input_width: image_width
  # Height of the image (SET)
  input_height: image_height
  # Depth of the image (SET)
  input_depth: image_depth
  
  # Number of output classes: 10 (SET)
  num_classes: num_classes
  # Label (word-idx) mappings (SET)
  label_word_mappings: label_word_mappings

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/tasks/text_to_class/dummy_language_identification.yml
================================================
# This file defines the default values for the dummy language identification task.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/language_identification/dummy'

# Defines the set that will be used used (LOADED)
# True: training set | False: test set.
use_train_data: True

# If set, task will always (re)generate data (LOADED)
regenerate: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of images (OUTPUT)
  inputs: inputs

  # Stream containing targets (label ids) (OUTPUT)
  targets: targets

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/tasks/text_to_class/wily_language_identification.yml
================================================
# This file defines the default values for the WiLY language identification task.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/language_identification/wily'

# Defines the set that will be used used (LOADED)
# True: training set | False: test set.
use_train_data: True

# If set, task will always (re)generate data (LOADED)
regenerate: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of images (OUTPUT)
  inputs: inputs

  # Stream containing targets (label ids) (OUTPUT)
  targets: targets

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/tasks/text_to_class/wily_ngram_language_modeling.yml
================================================
# This file defines the default values for the ngram language modeling
# using WiLY dataset.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: '~/data/language_identification/wily'

# Defines the set that will be used used (LOADED)
# True: training set | False: test set.
use_train_data: True

# Size of the context (LOADED)
context: 2

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of images (OUTPUT)
  inputs: inputs

  # Stream containing targets (label ids) (OUTPUT)
  targets: targets

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/tasks/text_to_text/translation_pairs.yml
================================================
# This file defines the default values for the WikiText language modeling.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: ~/data/language_modeling/translation_pairs

# Defines the dataset that will be used used (LOADED)
# Options: eng-fra, eng-pol
dataset: eng-fra

# Defines the used subset (LOADED)
# Options: train | valid | test
subset: train

# Length limit of source and target sentence
# if < 0, no limit
sentence_length: 10

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of tokenized source sentences (OUTPUT)
  sources: sources

  # Stream containing batch of tokenized target sentences (OUTPUT)
  targets: targets

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/tasks/text_to_text/wikitext_language_modeling.yml
================================================
# This file defines the default values for the WikiText language modeling.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Folder where task will store data (LOADED)
data_folder: ~/data/language_modeling/wikitext

# Defines the dataset that will be used used (LOADED)
# Options: wikitext-2 | wikitext-103
dataset: wikitext-2

# Defines the used subset (LOADED)
# Options: train | valid | test
subset: train

# Length of sentence (i.e. number of tokens in input and target sentences)
sentence_length: 50

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing batch of indices (OUTPUT)
  # Every task MUST return that stream.
  indices: indices

  # Stream containing batch of tokenized source sentences (OUTPUT)
  sources: sources

  # Stream containing batch of tokenized target sentences (OUTPUT)
  targets: targets

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/transforms/concatenate_tensor.yml
================================================
# This file defines the default values for the ConcatenateTensor.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# List of names of streams that will be concatenated (LOADED)
# Can be string a single name or or comma separated string with list
input_streams: ''

# Dimensions of the input tensors (LOADED)
# Specific to a task/model, must be set by user (no DEFAULT)
# List of lists.
# input_dims: [[BATCH x ...], ...]

# Dimensions of the input tensor (LOADED)
# Specific to a task/model, must be set by user (no DEFAULT)
# output_dims: [BATCH x ...]

# Dimension along which tensors will be concatenated (LOADED)
dim: 1

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Size of the output tensor (SET)
  # (all dimensions except batch_size)
  output_size: output_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/transforms/list_to_tensor.yml
================================================
# This file defines the default values for the List to Tensor transformation.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Number of input dimensions, including last tensor (LOADED)
num_inputs_dims: 1

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Size of the intput_item (GET)
  # (last dimenstion)
  input_size: input_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/transforms/non_linearity.yml
================================================
# This file defines the default values for the NonLinearity.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Dimensions of the input tensor (LOADED)
# Specific to a task/model, must be set by user (no DEFAULT)
# input_dims: [BATCH x ....]

# Dimensions of the input tensor (LOADED)
# Specific to a task/model, must be set by user (no DEFAULT)
# output_dims: [BATCH x ....]

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Size of the output tensor (SET)
  # (all dimensions except batch_size)
  output_size: output_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/transforms/reduce_tensor.yml
================================================
# This file defines the default values for the ReduceTensor transformation.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Number of input dimensions, including batch (LOADED)
num_inputs_dims: 2

# Dimension along with the reduction will be applied (LOADED)
reduction_dim: 1

# Reduction type (LOADED)
# Options: sum | mean | min | max | argmin | argmax
reduction_type: sum

# If True, the output tensor is of the same size as input, except dim where it is of size 1 (LOADED)
keepdim: False

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  # Size of the intput_item (GET)
  # (last dimenstion)
  input_size: input_size

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/transforms/reshape_tensor.yml
================================================
# This file defines the default values for the Tensor Reshaper.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Dimensions of the input tensor (LOADED)
# Specific to a task/model, must be set by user (no DEFAULT)
# input_dims: [BATCH x ....]

# Dimensions of the input tensor (LOADED)
# Specific to a task/model, must be set by user (no DEFAULT)
# output_dims: [BATCH x ....]

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing input tensor (INPUT)
  inputs: inputs

  # Stream containing output tensor (OUTPUT)
  outputs: outputs

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  # Size of the output tensor (SET)
  # (all dimensions except batch_size)
  output_size: output_size

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/viewers/image_viewer.yml
================================================
# This file defines the default values for the ImageViewer.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# Number of sample that will be printed (LOADED)
# Default: -1 (means random)
sample_number: -1

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

  # Stream containing inages (INPUT)
  images: images

  # Stream containing target labels (strings) (INPUT)
  label: labels

  # Stream containing predicted labels (strings) (INPUT)
  answers: answers


globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/components/viewers/stream_viewer.yml
================================================
# This file defines the default values for the Stream Viewer.

####################################################################
# 1. CONFIGURATION PARAMETERS that will be LOADED by the component.
####################################################################

# List of names of streams that will be displayed (LOADED)
# Can be string a single name or or comma separated string with list
input_streams: ''

# Number of sample that will be printed (LOADED)
# Default: -1 (means random)
sample_number: -1

streams: 
  ####################################################################
  # 2. Keymappings associated with INPUT and OUTPUT streams.
  ####################################################################

globals:
  ####################################################################
  # 3. Keymappings of variables that will be RETRIEVED from GLOBALS.
  ####################################################################

  ####################################################################
  # 4. Keymappings associated with GLOBAL variables that will be SET.
  ####################################################################

  ####################################################################
  # 5. Keymappings associated with statistics that will be ADDED.
  ####################################################################



================================================
FILE: configs/default/workers/offline_trainer.yml
================================================
####################################################################
# Section defining all the default values of parameters used during training when using ptp-offline-trainer.
# If you want to use different section for "training" pass its name as command line argument '--training_section_name' to trainer (DEFAULT: training)
# Note: the following parameters will be (anyway) used as default values.
default_training:
  # Set the random seeds: -1 means that they will be picked randomly.
  # Note: their final values will be stored in the final training_configuration.yml saved to log dir.
  seed_numpy: -1
  seed_torch: -1

  # Default batch size.
  batch_size: 64

  # Definition of the task (Mandatory!)
  #task:
  #  One must define its type (Mandatory!)
  #  type: ?
  #  The rest of the content of that section is task-specific...
  
  # Section describing curriculum learning (Optional)
  #curriculum_learning: 
  #  # Flag indicating whether curriculum learning has to finish before (eventual) termination of the training.
  #  must_finish: True
  #  The rest of the content of that section is task-specific...

  # Definition of optimizer (Mandatory!)
  #optimizer:
  #  # Type - generally all optimizers from PyTorch.optim are allowed (Mandatory!)
  #  type: Adam
  #  # Options: 
  #  lr: 0.0001
  #  The rest of the content of that section is optimizer-specific...

  # Set a default configuration section for data loader.
  dataloader:
    # Shuffle set by default.
    shuffle: True 
    batch_sampler: None
     # Do not use multiprocessing by default.
    num_workers: 0
    pin_memory: False
    # Do not drop last frame by default.
    drop_last: False
    timeout: 0

  # Definition of sampler (Optional)
  # When this section will not be present, worker will use "standard" sampling (please refer to shuffle in dataloader)
  #sampler:
  #  # Type - generally all samplers from PyTorch (plus some new onses) are allowed (Mandatory!)
  #  # Options: 
  #  type: RandomSmpler
  #  The rest of the content of that section is optimizer-specific...

  # Terminal conditions that will be used during training.
  # They can (and ofter should) be overwritten.
  terminal_conditions:
    # Terminal condition I: loss threshold, going below will terminate the training.
    loss_stop_threshold: 0.00001 # 1e-5
    # Terminal condition II: Early stopping monitor validation loss, if it didn't down during last n validations, training will be terminated (Optional, negative means that this condition is disabled)
    early_stop_validations: 10
    # Terminal condition III: maximal number of epochs (Mandatory for this trainer! Must be > 0)
    epoch_limit: 10
    # Terminal condition IV: maximal number of episodes (Optional, -1 (negative) means that this condition is disabled)
    episode_limit: -1



####################################################################
# Section defining all the default values of parameters used during validation.
# If you want to use different section for validation pass its name as command line argument '--validation_section_name' to trainer (DEFAULT: validation)
# Note: the following parameters will be (anyway) used as default values.
default_validation:
  # Defines how often the partial validation will be performed.
  # In this trainer Partial Validation is optional (negative value means it is disabled)
  partial_validation_interval: -1

  # Definition of the task (mandatory!)
  #task:
  #  One must define its type (Mandatory!)
  #  type: ?
  #  The rest of the content of that section is task-specific...

  # Set a default configuration section for data loader.
  dataloader:
    # Shuffle set by default.
    shuffle: True 
     # Do not use multiprocessing by default.
    num_workers: 0
    pin_memory: False
    # Do not drop last frame by default.
    drop_last: False
    timeout: 0

  # Definition of sampler (Optional)
  # When this section will not be present, worker will use "standard" sampling (please refer to shuffle in dataloader)
  #sampler:
  #  # Type - generally all samplers from PyTorch (plus some new onses) are allowed (Mandatory!)
  #  # Options: 
  #  type: RandomSmpler
  #  The rest of the content of that section is optimizer-specific...



####################################################################
# Section defining all the default values of parameters used during training.
# If you want to use different section for validation pass its name as command line argument '--pipeline_section_name' to trainer (DEFAULT: pipeline)
pipeline: 
  # Pipeline must contain at least one component.
  #name_1:
  #   Each component must have defined its priority... (Mandatory!)
  #   priority: 0.1 # Can be float. Smaller means higher priority, up to zero.
  #   # ... and type (Mandatory!)
  #   type: ?
  #   The rest of the content of that section is component-specific...




================================================
FILE: configs/default/workers/online_trainer.yml
================================================
####################################################################
# Section defining all the default values of parameters used during training when using ptp-online-trainer.
# If you want to use different section for "training" pass its name as command line argument '--training_section_name' to trainer (DEFAULT: training)
# Note: the following parameters will be (anyway) used as default values.
default_training:
  # Set the random seeds: -1 means that they will be picked randomly.
  # Note: their final values will be stored in the final training_configuration.yml saved to log dir.
  seed_numpy: -1
  seed_torch: -1

  # Default batch size.
  batch_size: 64

  # Definition of the task (Mandatory!)
  #task:
  #  One must define its type (Mandatory!)
  #  type: ?
  #  The rest of the content of that section is task-specific...
  
  # Section describing curriculum learning (Optional)
  #curriculum_learning: 
  #  # Flag indicating whether curriculum learning has to finish before (eventual) termination of the training.
  #  must_finish: True
  #  The rest of the content of that section is task-specific...

  # Definition of optimizer (Mandatory!)
  #optimizer:
  #  # Type - generally all optimizers from PyTorch.optim are allowed (Mandatory!)
  #  type: Adam
  #  # Options: 
  #  lr: 0.0001
  #  The rest of the content of that section is optimizer-specific...

  # Set a default configuration section for data loader.
  dataloader:
    # Shuffle set by default.
    shuffle: True 
    batch_sampler: None
     # Do not use multiprocessing by default.
    num_workers: 0
    pin_memory: False
    # Do not drop last frame by default.
    drop_last: False
    timeout: 0

  # Definition of sampler (Optional)
  # When this section will not be present, worker will use "standard" sampling (please refer to shuffle in dataloader)
  #sampler:
  #  # Type - generally all samplers from PyTorch (plus some new onses) are allowed (Mandatory!)
  #  # Options: 
  #  type: RandomSmpler
  #  The rest of the content of that section is optimizer-specific...

  # Terminal conditions that will be used during training.
  # They can (and ofter should) be overwritten.
  terminal_conditions:
    # Terminal condition I: loss threshold, going below will terminate the training.
    loss_stop_threshold: 0.00001 # 1e-5
    # Terminal condition II: Early stopping monitor validation loss, if it didn't down during last n validations, training will be terminated (Optional, negative means that this condition is disabled)
    early_stop_validations: 10
    # Terminal condition III: maximal number of epochs (Optional, -1 (negative) means that this condition is disabled)
    epoch_limit: -1
    # Terminal condition IV: maximal number of episodes (Mandatory for this trainer! Must be > 0)
    episode_limit: 100000



####################################################################
# Section defining all the default values of parameters used during validation.
# If you want to use different section for validation pass its name as command line argument '--validation_section_name' to trainer (DEFAULT: validation)
# Note: the following parameters will be (anyway) used as default values.
default_validation:
  # Defines how often the partial validation will be performed.
  # In this trainer Partial Validation is mandatory, hence interval must be > 0.
  partial_validation_interval: 100

  # Definition of the task (mandatory!)
  #task:
  #  One must define its type (Mandatory!)
  #  type: ?
  #  The rest of the content of that section is task-specific...

  # Set a default configuration section for data loader.
  dataloader:
    # Shuffle set by default.
    shuffle: True 
     # Do not use multiprocessing by default.
    num_workers: 0
    pin_memory: False
    # Do not drop last frame by default.
    drop_last: False
    timeout: 0

  # Definition of sampler (Optional)
  # When this section will not be present, worker will use "standard" sampling (please refer to shuffle in dataloader)
  #sampler:
  #  # Type - generally all samplers from PyTorch (plus some new onses) are allowed (Mandatory!)
  #  # Options: 
  #  type: RandomSmpler
  #  The rest of the content of that section is optimizer-specific...



####################################################################
# Section defining all the default values of parameters used during training.
# If you want to use different section for validation pass its name as command line argument '--pipeline_section_name' to trainer (DEFAULT: pipeline)
pipeline: 
  # Pipeline must contain at least one component.
  #name_1:
  #   Each component must have defined its priority... (Mandatory!)
  #   priority: 0.1 # Can be float. Smaller means higher priority, up to zero.
  #   # ... and type (Mandatory!)
  #   type: ?
  #   The rest of the content of that section is component-specific...




================================================
FILE: configs/default/workers/processor.yml
================================================
####################################################################
# Section defining all the default values of parameters used during testing.
# If you want to use different section during "processing" pass its name as command line argument '--section_name' to trainer (DEFAULT: test)
# Note: the following parameters will be (anyway) used as default values.
default_test:
  # Set the random seeds: -1 means that they will be picked randomly.
  seed_numpy: -1
  seed_torch: -1

  # Default batch size.
  batch_size: 64

  # Definition of the task (Mandatory!)
  #task:
  #  One must define its type (Mandatory!)
  #  type: ?
  #  The rest of the content of that section is task-specific...

  # Set a default configuration section for data loader.
  dataloader:
    # Shuffle set by default.
    shuffle: True 
    batch_sampler: None
     # Do not use multiprocessing by default.
    num_workers: 0
    pin_memory: False
    # Do not drop last frame by default.
    drop_last: False
    timeout: 0

  # Definition of sampler (Optional)
  # When this section will not be present, worker will use "standard" sampling (please refer to shuffle in dataloader)
  #sampler:
  #  # Type - generally all samplers from PyTorch (plus some new onses) are allowed (Mandatory!)
  #  # Options: 
  #  type: RandomSampler
  #  The rest of the content of that section is optimizer-specific...

 # Terminal condition that will be used during processing.
  terminal_conditions:
    # Terminal condition : maximal number of episodes (Optional, -1 means that processor will perform one pass over the whole dataset/split)
    episode_limit: -1


####################################################################
# Section defining all the default values of parameters used during training.
# If you want to use different section for validation pass its name as command line argument '--pipeline_section_name' to trainer (DEFAULT: pipeline)
pipeline: 
  # Pipeline must contain at least one component.
  #name_1:
  #   Each component must have defined its priority... (Mandatory!)
  #   priority: 0.1 # Can be float. Smaller means higher priority, up to zero.
  #   # ... and type (Mandatory!)
  #   type: ?
  #   The rest of the content of that section is component-specific...


================================================
FILE: configs/mnist/default_mnist.yml
================================================
# Training parameters:
training:
  task: 
    type: MNIST
    batch_size: &b 64
    use_train_data: True
    #resize: [32, 32]
  # Use sampler that operates on a subset.
  sampler:
    type: SubsetRandomSampler
    indices: [0, 55000]
  # optimizer parameters:
  optimizer:
    type: Adam
    lr: 0.0001
  # settings parameters
  terminal_conditions:
    loss_stop_threshold: 0.05
    early_stop_validations: -1
    episode_limit: 10000
    epoch_limit: 10

# Validation parameters:
validation:
  #partial_validation_interval: 100
  task:
    type: MNIST
    batch_size: *b
    use_train_data: True  # True because we are splitting the training set to: validation and training
    #resize: [32, 32]
  # Use sampler that operates on a subset.
  sampler:
    type: SubsetRandomSampler
    indices: [55000, 60000]

# Testing parameters:
test:
  task:
    type: MNIST
    batch_size: *b
    use_train_data: False
    #resize: [32, 32]

pipeline:
  disable: image_viewer

  # Loss
  nllloss:
    type: NLLLoss
    priority: 10.0

  # Statistics.
  batch_size:
    priority: 100.0
    type: BatchSizeStatistics

  accuracy:
    priority: 100.1
    type: AccuracyStatistics


  precision_recall:
    priority: 100.2
    type: PrecisionRecallStatistics
    use_word_mappings: True
    show_class_scores: True
    globals:
      word_mappings: label_word_mappings

  answer_decoder:
    priority: 100.3
    type: WordDecoder
    import_word_mappings_from_globals: True
    globals:
      word_mappings: label_word_mappings
    streams:
      inputs: predictions
      outputs: answers

  stream_viewer:
    priority: 100.4
    type: StreamViewer
    input_streams: labels, answers

  image_viewer:
    priority: 100.5
    type: ImageViewer
    streams:
      images: inputs
      labels: labels
      answers: answers



================================================
FILE: configs/mnist/mnist_classification_convnet_softmax.yml
================================================
# Load config defining MNIST tasks for training, validation and testing.
default_configs: mnist/default_mnist.yml

pipeline:

  # Model consisting of two components.
  image_encoder:
    type: ConvNetEncoder
    priority: 1

  # Reshape inputs
  reshaper:
    type: ReshapeTensor
    input_dims: [-1, 16, 1, 1]
    output_dims: [-1, 16]
    priority: 2
    streams:
      inputs: feature_maps
      outputs: reshaped_maps
    globals:
      output_size: reshaped_maps_size

  # Image classifier.
  classifier:
    type: FeedForwardNetwork 
    priority: 3
    streams:
      inputs: reshaped_maps
    globals:
      input_size: reshaped_maps_size
      prediction_size: num_classes

#: pipeline


================================================
FILE: configs/mnist/mnist_classification_kfold_softmax.yml
================================================
# Load config defining MNIST tasks for training, validation and testing.
default_configs: mnist/default_mnist.yml

# Training parameters:
training:
  task: 
    type: MNIST
    batch_size: &b 64
    use_train_data: True
  # Use k-fold cross-validation random sampler.
  sampler:
    type: kFoldRandomSampler
    folds: 10 # Each with size of 6000
  # optimizer parameters:
  optimizer:
    type: Adam
    lr: 0.0001
  # settings parameters
  terminal_conditions:
    loss_stop_threshold: 0.5
    episode_limit: 10000
    epoch_limit: 5

# Validation parameters:
validation:
  partial_validation_interval: 500
  task:
    type: MNIST
    batch_size: *b
    use_train_data: True  # True because we are splitting the training set to: validation and training
  # Use k-fold cross-validation random sampler.
  sampler:
    type: kFoldRandomSampler
    folds: 10 # Each with size of 6000

pipeline:

  # Reshapes tensors.
  reshaper:
    type: ReshapeTensor
    input_dims: [-1, 1, 28, 28]
    output_dims: [-1, 784]
    priority: 1
    streams:
      outputs: reshaped_images
    globals:
      output_size: reshaped_image_size

  # Classifier.
  classifier:
    type: FeedForwardNetwork 
    priority: 2
    dropout_rate: 0.1
    hidden_sizes: [100, 100]
    streams:
      inputs: reshaped_images
    globals:
      input_size: reshaped_image_size
      prediction_size: num_classes

#: pipeline


================================================
FILE: configs/mnist/mnist_classification_lenet5.yml
================================================
# Load config defining MNIST tasks for training, validation and testing.
default_configs: mnist/default_mnist.yml

# Training parameters - overwrite defaults:
training:
  task: 
    resize_image: [32, 32]

# Validation parameters - overwrite defaults:
validation:
  task:
    resize_image: [32, 32]

# Testing parameters - overwrite defaults:
test:
  task:
    resize_image: [32, 32]

# Definition of the pipeline.
pipeline:

  # Image classifier.
  image_classifier:
    type: LeNet5
    priority: 1
    globals:
      prediction_size: num_classes

#: pipeline


================================================
FILE: configs/mnist/mnist_classification_softmax.yml
================================================
# Load config defining MNIST tasks for training, validation and testing.
default_configs: mnist/default_mnist.yml

pipeline:

  # Reshapes tensors.
  reshaper:
    type: ReshapeTensor
    input_dims: [-1, 1, 28, 28]
    output_dims: [-1, 784]
    priority: 1
    streams:
      outputs: reshaped_images
    globals:
      output_size: reshaped_image_size

  # Classifier.
  classifier:
    type: FeedForwardNetwork 
    priority: 2
    dropout_rate: 0.1
    hidden_sizes: [100, 100]
    streams:
      inputs: reshaped_images
    globals:
      input_size: reshaped_image_size
      prediction_size: num_classes

#: pipeline


================================================
FILE: configs/mnist/mnist_classification_vf_2lenet5_2losses.yml
================================================
# Load config defining MNIST tasks for training, validation and testing.
default_configs: mnist/default_mnist.yml

# Training parameters - overwrite defaults:
training:
  task: 
    resize_image: [32, 32]

# Validation parameters - overwrite defaults:
validation:
  task:
    resize_image: [32, 32]

# Testing parameters - overwrite defaults:
test:
  task:
    resize_image: [32, 32]

# Definition of the pipeline.
pipeline:

  # Disable components for "default" flow.
  disable: nllloss, accuracy, precision_recall, image_viewer

  # Add global variables.
  global_publisher:
    type: GlobalVariablePublisher
    priority: 0
    keys: [num_classes1, num_classes2, word_to_ix1, word_to_ix2]
    values: [3, 7, {"Zero": 0, "One": 1, "Two": 2}, {"Three": 0, "Four": 1, "Five": 2, "Six": 3, "Seven": 4, "Eight": 5, "Nine": 6}]

  ################# Flow 1 #################
  # Image classifier.
  flow1_image_classifier:
    type: LeNet5
    priority: 1.1
    globals:
      prediction_size: num_classes1
    streams:
      inputs: inputs
      predictions: flow1_predictions
      
  flow1_label_to_mask1:
    type: StringToMask
    priority: 1.2
    globals:
      word_mappings: word_to_ix1
    streams:
      strings: labels
      masks: flow1_masks

  flow1_label_to_target1:
    type: LabelIndexer
    priority: 1.3
    import_word_mappings_from_globals: True
    globals:
      word_mappings: word_to_ix1
    streams:
      inputs: labels
      outputs: flow1_targets

  # Masked loss.
  flow1_nllloss:
    type: NLLLoss
    priority: 1.4
    use_masking: True
    streams:
      targets: flow1_targets
      predictions: flow1_predictions
      masks: flow1_masks
      loss: flow1_loss

  # Statistics.
  flow1_accuracy:
    type: AccuracyStatistics
    priority: 1.51
    use_masking: True
    streams:
      predictions: flow1_predictions
      targets: flow1_targets
      masks: flow1_masks
    statistics:
      accuracy: flow1_accuracy

  flow1_precision_recall:
    type: PrecisionRecallStatistics
    priority: 1.52
    use_word_mappings: True
    show_class_scores: True
    show_confusion_matrix: True
    use_masking: True
    globals:
      word_mappings: word_to_ix1
      num_classes: num_classes1
    streams:
      targets: flow1_targets
      predictions: flow1_predictions
      masks: flow1_masks
    statistics:
      precision: flow1_precision
      recall: flow1_recall
      f1score: flow1_f1score

  ################# Flow 2 #################
  # Image classifier.
  flow2_image_classifier:
    type: LeNet5
    priority: 2.1
    globals:
      prediction_size: num_classes2
    streams:
      inputs: inputs
      predictions: flow2_predictions
      
  flow2_label_to_mask2:
    type: StringToMask
    priority: 2.2
    globals:
      word_mappings: word_to_ix2
    streams:
      strings: labels
      masks: flow2_masks

  flow2_label_to_target2:
    type: LabelIndexer
    priority: 2.3
    import_word_mappings_from_globals: True
    globals:
      word_mappings: word_to_ix2
    streams:
      inputs: labels
      outputs: flow2_targets

  # Masked loss.
  flow2_nllloss:
    type: NLLLoss
    priority: 2.4
    use_masking: True
    streams:
      targets: flow2_targets
      predictions: flow2_predictions
      masks: flow2_masks
      loss: flow2_loss

  # Statistics.
  flow2_accuracy:
    type: AccuracyStatistics
    priority: 2.41
    use_masking: True
    streams:
      targets: flow2_targets
      predictions: flow2_predictions
      masks: flow2_masks
    statistics:
      accuracy: flow2_accuracy

  flow2_precision_recall:
    type: PrecisionRecallStatistics
    priority: 2.42
    use_word_mappings: True
    show_class_scores: True
    show_confusion_matrix: True
    use_masking: True
    globals:
      word_mappings: word_to_ix2
      num_classes: num_classes2
    streams:
      targets: flow2_targets
      predictions: flow2_predictions
      masks: flow2_masks
    statistics:
      precision: flow2_precision
      recall: flow2_recall
      f1score: flow2_f1score

  ################# JOIN #################
  joined_predictions:
    type: JoinMaskedPredictions
    priority: 3.1
    # Names of used input streams.
    input_prediction_streams: [flow1_predictions, flow2_predictions]
    input_mask_streams: [flow1_masks, flow2_masks]
    input_word_mappings: [word_to_ix1, word_to_ix2]
    globals:
      output_word_mappings: label_word_mappings # from MNIST task.
    streams:
      output_strings: merged_predictions
      output_indices: merged_indices

  # Statistics.
  joined_accuracy:
    type: AccuracyStatistics
    priority: 3.21
    # Use prediction indices instead of distributions.
    use_prediction_distributions: False
    streams:
      targets: targets
      predictions: merged_indices
    statistics:
      accuracy: joined_accuracy

  joined_precision_recall:
    type: PrecisionRecallStatistics
    priority: 3.22
    # Use prediction indices instead of distributions.
    use_prediction_distributions: False
    use_word_mappings: True
    show_class_scores: True
    show_confusion_matrix: True
    globals:
      word_mappings: label_word_mappings # straight from MNIST
      #num_classes: num_classes
    streams:
      targets: targets # straight from MNIST
      predictions: merged_indices
    statistics:
      precision: joined_precision
      recall: joined_recall
      f1score: joined_f1score

  # "Fix" (overwrite) stream names in viewers.
  image_viewer:
    streams:
      answers: merged_predictions

  stream_viewer:
    input_streams: labels, merged_predictions

#: pipeline


================================================
FILE: configs/mnist/mnist_classification_vf_shared_convnet_2softmaxes_2losses.yml
================================================
# Load config defining MNIST tasks for training, validation and testing.
default_configs: mnist/default_mnist.yml

# Training parameters - overwrite defaults:
training:
  task: 
    #resize_image: [32, 32]
    batch_size: 64
  #optimizer:
  #  #type: Adam
  #  lr: 0.001
  #terminal_conditions:
  #  loss_stop_threshold: 0.08

# Validation parameters - overwrite defaults:
#validation:
#  partial_validation_interval: 10
#  task:
#    resize_image: [32, 32]

# Testing parameters - overwrite defaults:
#test:
#  task:
#    resize_image: [32, 32]

# Definition of the pipeline.
pipeline:

  # Disable components for "default" flow.
  disable: nllloss, accuracy, precision_recall, answer_decoder, image_viewer

  ################# SHARED #################

  # Add global variables.
  global_publisher:
    type: GlobalVariablePublisher
    priority: 0.1
    keys: [num_classes1, num_classes2, word_to_ix1, word_to_ix2]
    values: [3, 7, {"Three": 0, "One": 1, "Five": 2}, {"Four": 0, "Two": 1, "Zero": 2, "Six": 3, "Seven": 4, "Eight": 5, "Nine": 6}]
    #values: [3, 7, {"Zero": 0, "One": 1, "Two": 2}, {"Three": 0, "Four": 1, "Five": 2, "Six": 3, "Seven": 4, "Eight": 5, "Nine": 6}]

  # Shared model - encoder.
  image_encoder:
    type: ConvNetEncoder
    priority: 0.2

  # Reshape inputs
  reshaper:
    type: ReshapeTensor
    input_dims: [-1, 16, 1, 1]
    output_dims: [-1, 16]
    priority: 0.3
    streams:
      inputs: feature_maps
      outputs: reshaped_maps
    globals:
      output_size: reshaped_maps_size

  ################# Flow 1 #################
  # Classifier.
  flow1_classifier:
    type: FeedForwardNetwork 
    priority: 1.1
    streams:
    globals:
      input_size: reshaped_maps_size
      prediction_size: num_classes1
    streams:
      inputs: reshaped_maps
      predictions: flow1_predictions
      
  flow1_label_to_mask1:
    type: StringToMask
    priority: 1.2
    globals:
      word_mappings: word_to_ix1
    streams:
      strings: labels
      masks: flow1_masks

  flow1_label_to_target1:
    type: LabelIndexer
    priority: 1.3
    import_word_mappings_from_globals: True
    globals:
      word_mappings: word_to_ix1
    streams:
      inputs: labels
      outputs: flow1_targets

  # Masked loss.
  flow1_nllloss:
    type: NLLLoss
    priority: 1.4
    use_masking: True
    streams:
      targets: flow1_targets
      predictions: flow1_predictions
      masks: flow1_masks
      loss: flow1_loss

  # Statistics.
  flow1_accuracy:
    type: AccuracyStatistics
    priority: 1.51
    use_masking: True
    streams:
      predictions: flow1_predictions
      targets: flow1_targets
      masks: flow1_masks
    statistics:
      accuracy: flow1_accuracy

  flow1_precision_recall:
    type: PrecisionRecallStatistics
    priority: 1.52
    use_word_mappings: True
    show_class_scores: True
    show_confusion_matrix: True
    use_masking: True
    globals:
      word_mappings: word_to_ix1
      num_classes: num_classes1
    streams:
      targets: flow1_targets
      predictions: flow1_predictions
      masks: flow1_masks
    statistics:
      precision: flow1_precision
      recall: flow1_recall
      f1score: flow1_f1score

  ################# Flow 2 #################
  # Classifier.
  flow2_classifier:
    type: FeedForwardNetwork 
    priority: 2.1
    streams:
    globals:
      input_size: reshaped_maps_size
      prediction_size: num_classes2
    streams:
      inputs: reshaped_maps
      predictions: flow2_predictions
      
  flow2_label_to_mask2:
    type: StringToMask
    priority: 2.2
    globals:
      word_mappings: word_to_ix2
    streams:
      strings: labels
      masks: flow2_masks

  flow2_label_to_target2:
    type: LabelIndexer
    priority: 2.3
    import_word_mappings_from_globals: True
    globals:
      word_mappings: word_to_ix2
    streams:
      inputs: labels
      outputs: flow2_targets

  # Masked loss.
  flow2_nllloss:
    type: NLLLoss
    priority: 2.4
    use_masking: True
    streams:
      targets: flow2_targets
      predictions: flow2_predictions
      masks: flow2_masks
      loss: flow2_loss

  # Statistics.
  flow2_accuracy:
    type: AccuracyStatistics
    priority: 2.41
    use_masking: True
    streams:
      targets: flow2_targets
      predictions: flow2_predictions
      masks: flow2_masks
    statistics:
      accuracy: flow2_accuracy

  flow2_precision_recall:
    type: PrecisionRecallStatistics
    priority: 2.42
    use_word_mappings: True
    show_class_scores: True
    show_confusion_matrix: True
    use_masking: True
    globals:
      word_mappings: word_to_ix2
      num_classes: num_classes2
    streams:
      targets: flow2_targets
      predictions: flow2_predictions
      masks: flow2_masks
    statistics:
      precision: flow2_precision
      recall: flow2_recall
      f1score: flow2_f1score

  ################# JOIN #################
  joined_predictions:
    type: JoinMaskedPredictions
    priority: 3.1
    # Names of used input streams.
    input_prediction_streams: [flow1_predictions, flow2_predictions]
    input_mask_streams: [flow1_masks, flow2_masks]
    input_word_mappings: [word_to_ix1, word_to_ix2]
    globals:
      output_word_mappings: label_word_mappings # from MNIST task.
    streams:
      output_strings: merged_predictions
      output_indices: merged_indices

  # Statistics.
  joined_precision_recall:
    type: PrecisionRecallStatistics
    priority: 3.22
    # Use prediction indices instead of distributions.
    use_prediction_distributions: False
    use_word_mappings: True
    show_class_scores: True
    show_confusion_matrix: True
    globals:
      word_mappings: label_word_mappings # straight from MNIST
      #num_classes: num_classes
    streams:
      targets: targets # straight from MNIST
      predictions: merged_indices
    statistics:
      precision: joined_precision
      recall: joined_recall
      f1score: joined_f1score

  # "Fix" (overwrite) stream names in viewers.
  image_viewer:
    streams:
      answers: merged_predictions

  stream_viewer:
    input_streams: labels, merged_predictions

#: pipeline


================================================
FILE: configs/molecule_classification/default_molecule_classification.yml
================================================
# Training parameters:
training:
  task: 
    type: SimpleMolecules
    batch_size: &b 64
    split: training
    resize_image: [87, 87]
  # Resizing slows down the batch generation, so let's use many dataloaders.
  dataloader:
    num_workers: 4

  optimizer:
    type: Adam
    lr: 0.0001
  # settings parameters
  terminal_conditions:
    loss_stop_threshold: 0.15
    early_stop_validations: -1
    episode_limit: 10000
    epoch_limit: 10

# Validation parameters:
validation:
  task:
    type: SimpleMolecules
    batch_size: *b
    split: validation
    resize_image: [87, 87]
  dataloader:
    num_workers: 4

# Testing parameters:
test:
  task:
    type: SimpleMolecules
    batch_size: *b
    split: test
    resize_image: [87, 87]
  dataloader:
    num_workers: 4

test_mirror_blur:
  task:
    type: SimpleMolecules
    batch_size: *b
    split: test_mirror_blur
    resize_image: [87, 87]
  dataloader:
    num_workers: 4

test_on_grid:
  task:
    type: SimpleMolecules
    batch_size: *b
    split: test_on_grid
    resize_image: [87, 87]
  dataloader:
    num_workers: 4

test_handwritten:
  task:
    type: SimpleMolecules
    batch_size: *b
    split: test_handwritten
    resize_image: [87, 87]
  dataloader:
    num_workers: 4


# Default components shared between all molecule classification pipelines.
pipeline:
  #disable: image_viewer

  # Loss
  nllloss:
    type: NLLLoss
    priority: 4
    streams:
      targets: targets
      predictions: predictions

  accuracy:
    priority: 5
    type: AccuracyStatistics
    streams:
      targets: targets
      predictions: predictions

  answer_decoder:
    priority: 6
    type: WordDecoder
    import_word_mappings_from_globals: True
    globals:
      word_mappings: label_word_mappings
    streams:
      inputs: predictions
      outputs: predicted_answers

  stream_viewer:
    priority: 7
    type: StreamViewer
    input_streams: labels, targets, predictions, predicted_answers

  image_viewer:
    priority: 100.5
    type: ImageViewer
    streams:
      images: images
      labels: labels
      answers: predicted_answers



================================================
FILE: configs/molecule_classification/molecule_classification_convnet_softmax.yml
================================================
# Load config defining MNIST tasks for training, validation and testing.
default_configs: molecule_classification/default_molecule_classification.yml

# Pipeline - only the model-related components.
pipeline:
  # Model 1: 3 CNN layers.
  image_encoder:
    type: ConvNetEncoder
    priority: 1
    # Using default stream names, so the following could be removed (leaving it just for the clarity though).
    streams:
      inputs: images
      feature_maps: feature_maps

  # Reshape inputs
  reshaper:
    type: ReshapeTensor
    # TODO: change!
    #input_dims: [-1, 16, 107, 107]
    #output_dims: [-1, 183184]
    input_dims: [-1, 16, 9, 9]
    output_dims: [-1, 1296]
    priority: 2
    streams:
      inputs: feature_maps
      outputs: reshaped_maps
    globals:
      output_size: reshaped_maps_size

  # Model 2: 1 Fully connected layer with softmax acitvation.
  classifier:
    type: FeedForwardNetwork 
    priority: 3
    streams:
      inputs: reshaped_maps
      # Using default stream name, so the following could be removed (leaving it just for the clarity though).
      predictions: predictions
    globals:
      input_size: reshaped_maps_size
      prediction_size: num_classes

#: pipeline


================================================
FILE: configs/molecule_classification/molecule_classification_vgg16_molecules.yml
================================================
# Load config defining MNIST tasks for training, validation and testing.
default_configs: molecule_classification/default_molecule_classification.yml

# Overrride some training parameters:
training:
  task: 
    resize_image: [224, 224]
    image_depth: 3

# Override some validation parameters:
validation:
  task:
    resize_image: [224, 224]
    image_depth: 3

# Override some test parameters:
test:
  task:
    resize_image: [224, 224]
    image_depth: 3

test_mirror_blur:
  task:
    resize_image: [224, 224]
    image_depth: 3

test_on_grid:
  task:
    resize_image: [224, 224]
    image_depth: 3

test_handwritten:
  task:
    resize_image: [224, 224]
    image_depth: 3


# Pipeline - only the model-related components.
pipeline:

  # Model 1: VGG
  image_encoder:
    type: GenericImageEncoder
    pretrained: False
    priority: 1
    # Using default stream names, so the following could be removed (leaving it just for the clarity though).
    streams:
      inputs: images
      outputs: vgg_images
    globals:
      output_size: num_classes


  # Model 2: 1 Fully connected layer with softmax acitvation.
  classifier:
    type: FeedForwardNetwork 
    priority: 3
    streams:
      inputs: vgg_images
      # Using default stream name, so the following could be removed (leaving it just for the clarity though).
      predictions: predictions
    globals:
      input_size: num_classes
      prediction_size: num_classes

#: pipeline


================================================
FILE: configs/translation/eng_fra_translation_enc_attndec.yml
================================================
# This pipeline applied an encoder-decoder GRU with attention on the open Tatoeba translation sentence pairs. 
# Inspired by https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html .
# Note that training will be slower than in the tutorial, as teacher forcing is not implemented here.

# Training parameters:
training:
  task:
    type: &p_type TranslationPairs
    data_folder: &data_folder ~/data/language_modeling/translation_pairs
    dataset: &dataset eng-fra
    subset: train
    sentence_length: 10
    batch_size:  64

  # optimizer parameters:
  optimizer:
    type: Adam
    lr: 1.0e-3

  # settings parameters
  terminal_conditions:
    loss_stop_threshold: 1.0e-2
    episode_limit: 1000000
    epoch_limit: 100

# Validation parameters:
validation:
  partial_validation_interval: 100
  task:
    type: *p_type
    data_folder: *data_folder
    dataset: *dataset
    subset: valid
    sentence_length: 10
    batch_size:  64

# Testing parameters:
test:
  task:
    type: *p_type 
    data_folder: *data_folder
    dataset: *dataset
    subset: test
    sentence_length: 10
    batch_size: 64

pipeline:

  # Source encoding - model 1.
  source_sentence_embedding:
    type: SentenceEmbeddings
    priority: 1.1
    embeddings_size: 50
    pretrained_embeddings: glove.6B.50d.txt
    data_folder: *data_folder
    source_vocabulary_files: eng-fra/eng.train.txt,eng-fra/eng.valid.txt,eng-fra/eng.test.txt
    vocabulary_mappings_file: eng-fra/eng.all.tokenized_words
    regenerate: True
    additional_tokens: <PAD>,<EOS>
    import_word_mappings_from_globals: False
    export_word_mappings_to_globals: False
    fixed_padding: 10
    streams:
      inputs: sources
      outputs: embedded_sources
        
  # Target encoding.
  target_indexer:
    type: SentenceIndexer
    priority: 2.1
    data_folder: *data_folder
    source_vocabulary_files: eng-fra/fra.train.txt,eng-fra/fra.valid.txt,eng-fra/fra.test.txt
    import_word_mappings_from_globals: False
    export_word_mappings_to_globals: True
    export_pad_mapping_to_globals: True
    eos_token: True
    fixed_padding: 10
    additional_tokens: <PAD>,<EOS>
    regenerate: True
    streams:
      inputs: targets
      outputs: indexed_targets
      pad_index: tgt_pad_index
  
  # Single layer GRU Encoder
  encoder:
    type: RecurrentNeuralNetwork
    cell_type: GRU
    priority: 3
    initial_state: Trainable
    hidden_size: 50
    num_layers: 1
    use_logsoftmax: False
    output_last_state: True
    prediction_mode: Dense
    ffn_output: False
    streams:
      inputs: embedded_sources
      predictions: s2s_encoder_output
      output_state: s2s_state_output
    globals:
      input_size: embeddings_size
      prediction_size: embeddings_size 

  # Single layer GRU Decoder with attention
  decoder:
    type: AttentionDecoder
    priority: 4
    hidden_size: 50
    use_logsoftmax: False
    autoregression_length: 10
    prediction_mode: Dense
    streams:
      inputs: s2s_encoder_output
      predictions: s2s_decoder_output
      input_state: s2s_state_output
    globals:
      input_size: embeddings_size
      prediction_size: embeddings_size 

  # FF, to resize the from the output size of the seq2seq to the size of the target vector
  ff_resize_s2s_output:
    type: FeedForwardNetwork 
    use_logsoftmax: True
    dimensions: 3
    priority: 5
    streams:
      inputs: s2s_decoder_output
    globals:
      input_size: embeddings_size
      prediction_size: vocabulary_size

  # Loss
  nllloss:
    type: NLLLoss
    priority: 6
    num_targets_dims: 2
    streams:
      targets: indexed_targets
      loss: loss
    globals:
      ignore_index: tgt_pad_index

  # Prediction decoding.
  prediction_decoder:
    type: SentenceIndexer
    priority: 10
    # Reverse mode.
    reverse: True
    # Use distributions as inputs.
    use_input_distributions: True
    data_folder: *data_folder
    import_word_mappings_from_globals: True
    streams:
      inputs: predictions
      outputs: prediction_sentences


  # Statistics.
  batch_size:
    type: BatchSizeStatistics
    priority: 100.0

  bleu:
    type: BLEUStatistics
    priority: 100.2
    ignored_words: ["<PAD>", "<EOS>"]
    streams:
      targets: indexed_targets

      
  # Viewers.
  viewer:
    type: StreamViewer
    priority: 100.3
    input_streams: sources,targets,indexed_targets,prediction_sentences

#: pipeline


================================================
FILE: configs/tutorials/mnist_classification_convnet_softmax.yml
================================================
# Training parameters:
training:
  task: 
    type: MNIST
    batch_size: &b 64
    use_train_data: True
  # Use sampler that operates on a subset.
  sampler:
    type: SubsetRandomSampler
    indices: [0, 55000]
  # optimizer parameters:
  optimizer:
    type: Adam
    lr: 0.0001
  # settings parameters
  terminal_conditions:
    loss_stop_threshold: 0.15
    early_stop_validations: -1
    episode_limit: 10000
    epoch_limit: 10

# Validation parameters:
validation:
  task:
    type: MNIST
    batch_size: *b
    use_train_data: True  # True because we are splitting the training set to: validation and training
  # Use sampler that operates on a subset.
  sampler:
    type: SubsetRandomSampler
    indices: [55000, 60000]

# Testing parameters:
test:
  task:
    type: MNIST
    batch_size: *b
    use_train_data: False # Test set.

pipeline:
  # Model 1: 3 CNN layers.
  image_encoder:
    type: ConvNetEncoder
    priority: 1
    # Using default stream names, so the following could be removed (leaving it just for the clarity though).
    streams:
      inputs: inputs
      feature_maps: feature_maps

  # Reshape inputs
  reshaper:
    type: ReshapeTensor
    input_dims: [-1, 16, 1, 1]
    output_dims: [-1, 16]
    priority: 2
    streams:
      inputs: feature_maps
      outputs: reshaped_maps
    globals:
      output_size: reshaped_maps_size

  # Model 2: 1 Fully connected layer with softmax acitvation.
  classifier:
    type: FeedForwardNetwork 
    priority: 3
    streams:
      inputs: reshaped_maps
      # Using default stream name, so the following could be removed (leaving it just for the clarity though).
      predictions: predictions
    globals:
      input_size: reshaped_maps_size
      prediction_size: num_classes


  # Loss
  nllloss:
    type: NLLLoss
    priority: 4
    # Using default stream names, so the following could be removed (leaving it just for the clarity though).
    streams:
      targets: targets
      predictions: predictions

  accuracy:
    priority: 5
    type: AccuracyStatistics
    # Using default stream names, so the following could be removed (leaving it just for the clarity though).
    streams:
      targets: targets
      predictions: predictions

  answer_decoder:
    priority: 6
    type: WordDecoder
    import_word_mappings_from_globals: True
    globals:
      word_mappings: label_word_mappings
    streams:
      inputs: predictions
      outputs: predicted_answers

  stream_viewer:
    priority: 7
    type: StreamViewer
    input_streams: labels, targets, predictions, predicted_answers


#: pipeline


================================================
FILE: configs/vqa_med_2019/c1_classification/c1_classification_all_bow_vgg16_concat.yml
================================================
# Load config defining tasks for training, validation and testing.
default_configs: vqa_med_2019/c1_classification/default_c1_classification.yml

pipeline:

  global_publisher:
    type: GlobalVariablePublisher
    priority: 0
    # Add input_size to globals.
    keys: [image_size_encoder_input_size, image_size_encoder_output_size, image_encoder_output_size]
    values: [2, 10, 100]

  # First subpipeline: question.
  # Questions encoding.
  question_tokenizer:
    type: SentenceTokenizer
    priority: 1.1
    streams: 
      inputs: questions
      outputs: tokenized_questions

  question_onehot_encoder:
    type: SentenceOneHotEncoder
    priority: 1.2
    data_folder: ~/data/vqa-med
    word_mappings_file: questions.all.word.mappings.csv
    export_word_mappings_to_globals: True
    streams:
      inputs: tokenized_questions
      outputs: encoded_questions
    globals:
      vocabulary_size: question_vocabulary_size

  question_bow_encoder:
    type: BOWEncoder
    priority: 1.3
    streams:
      inputs: encoded_questions
      outputs: question_activations
    globals:
        bow_size: question_vocabulary_size


  # 2nd subpipeline: image size.
  # Model - image size classifier.
  image_size_encoder:
    type: FeedForwardNetwork 
    priority: 2.1
    streams:
      inputs: image_sizes
      predictions: image_size_activations
    globals:
      input_size: image_size_encoder_input_size
      prediction_size: image_size_encoder_output_size

  # 3rd subpipeline: image.
  # Image encoder.
  image_encoder:
    type: GenericImageEncoder
    priority: 3.1
    streams:
      inputs: images
      outputs: image_activations
    globals:
      output_size: image_encoder_output_size
  
  # 4th subpipeline: concatenation + FF.
  concat:
    type: ConcatenateTensor
    priority: 4.1
    input_streams: [question_activations,image_size_activations,image_activations]
    # ConcatenateTensor 
    dim: 1 # default
    input_dims: [[-1,99],[-1,10],[-1,100]]
    output_dims: [-1,209]
    streams:
      outputs: concatenated_activations
    globals:
      output_size: output_size


  classifier:
    type: FeedForwardNetwork 
    hidden_sizes: [100]
    priority: 4.2
    streams:
      inputs: concatenated_activations
    globals:
      input_size: output_size
      prediction_size: vocabulary_size_c1


  #: pipeline


================================================
FILE: configs/vqa_med_2019/c1_classification/c1_classification_all_rnn_vgg16_concat.yml
================================================
# Load config defining tasks for training, validation and testing.
default_configs: vqa_med_2019/c1_classification/default_c1_classification.yml

pipeline:

  global_publisher:
    type: GlobalVariablePublisher
    priority: 0
    # Add input_size to globals.
    keys: [question_embeddings_output_size, image_size_encoder_input_size, image_size_encoder_output_size, image_encoder_output_size]
    values: [100, 2, 10, 100]

  # First subpipeline: question.
  # Questions encoding.
  question_tokenizer:
    type: SentenceTokenizer
    priority: 1.1
    streams: 
      inputs: questions
      outputs: tokenized_questions

  # Model 1: Embeddings
  question_embeddings:
    type: SentenceEmbeddings
    priority: 1.2
    embeddings_size: 50
    pretrained_embeddings_file: glove.6B.50d.txt
    data_folder: ~/data/vqa-med
    word_mappings_file: questions.all.word.mappings.csv
    streams:
      inputs: tokenized_questions
      outputs: embedded_questions      
  
  # Model 2: RNN
  question_lstm:
    type: RecurrentNeuralNetwork
    cell_type: LSTM
    prediction_mode: Last
    priority: 1.3
    use_logsoftmax: False
    initial_state: Zero
    #num_layers: 5
    hidden_size: 50
    streams:
      inputs: embedded_questions
      predictions: question_activations
    globals:
      input_size: embeddings_size
      prediction_size: question_embeddings_output_size

  # 2nd subpipeline: image size.
  # Model - image size classifier.
  image_size_encoder:
    type: FeedForwardNetwork 
    priority: 2.1
    streams:
      inputs: image_sizes
      predictions: image_size_activations
    globals:
      input_size: image_size_encoder_input_size
      prediction_size: image_size_encoder_output_size

  # 3rd subpipeline: image.
  # Image encoder.
  image_encoder:
    type: GenericImageEncoder
    priority: 3.1
    streams:
      inputs: images
      outputs: image_activations
    globals:
      output_size: image_encoder_output_size
  
  # 4th subpipeline: concatenation + FF.
  concat:
    type: ConcatenateTensor
    priority: 4.1
    input_streams: [question_activations,image_size_activations,image_activations]
    # ConcatenateTensor 
    dim: 1 # default
    input_dims: [[-1,100],[-1,10],[-1,100]]
    output_dims: [-1,210]
    streams:
      outputs: concatenated_activations
    globals:
      output_size: output_size


  classifier:
    type: FeedForwardNetwork 
    hidden_sizes: [100]
    priority: 4.2
    streams:
      inputs: concatenated_activations
    globals:
      input_size: output_size
      prediction_size: vocabulary_size_c1


  #: pipeline


================================================
FILE: configs/vqa_med_2019/c1_classification/c1_classification_image_cnn_softmax.yml
================================================
# Load config defining tasks for training, validation and testing.
default_configs: vqa_med_2019/c1_classification/default_c1_classification.yml

pipeline:

  # Image encoder.
  image_encoder:
    type: ConvNetEncoder
    priority: 1.1
    streams:
      inputs: images

  # Reshape inputs
  reshaper:
    type: ReshapeTensor
    input_dims: [-1, 16, 26, 26]
    output_dims: [-1, 10816]
    priority: 1.2
    streams:
      inputs: feature_maps
      outputs: reshaped_maps
    globals:
      output_size: reshaped_maps_size

  # Model - softmax classifier.
  classifier:
    type: FeedForwardNetwork 
    hidden_sizes: [1000]
    priority: 3
    streams:
      inputs: reshaped_maps
    globals:
      input_size: reshaped_maps_size
      prediction_size: vocabulary_size_c1
  
#: pipeline


================================================
FILE: configs/vqa_med_2019/c1_classification/c1_classification_image_size_softmax.yml
================================================
# Load config defining tasks for training, validation and testing.
default_configs: vqa_med_2019/c1_classification/default_c1_classification.yml

pipeline:

  global_publisher:
    type: GlobalVariablePublisher
    priority: 1
    # Add input_size to globals, so classifier will use it.
    keys: input_size
    values: 2

  # Model - image size classifier.
  classifier:
    type: FeedForwardNetwork 
    hidden_sizes: [100]
    priority: 3
    streams:
      inputs: image_sizes
    globals:
      prediction_size: vocabulary_size_c1
  
#: pipeline


================================================
FILE: configs/vqa_med_2019/c1_classification/c1_classification_question_mimic_rnn.yml
================================================
# Load config defining tasks for training, validation and testing.
default_configs: vqa_med_2019/c1_classification/default_c1_classification.yml

pipeline:
  
  # Questions encoding.
  question_tokenizer:
    type: SentenceTokenizer
    priority: 1.1
    streams: 
      inputs: questions
      outputs: tokenized_questions

  # Model 1: Embeddings
  question_embeddings:
    type: SentenceEmbeddings
    priority: 1.2
    embeddings_size: 300
    pretrained_embeddings_file: mimic.fastText.no_clean.300d.pickled
    data_folder: ~/data/vqa-med
    word_mappings_file: questions.all.word.mappings.csv
    streams:
      inputs: tokenized_questions
      outputs: embedded_questions      
  
  # Model 2: RNN
  
Download .txt
gitextract_zxl7rewp/

├── .coveralls.yml
├── .gitignore
├── .lgtm.yml
├── .travis.yml
├── LICENSE
├── README.md
├── configs/
│   ├── cifar100/
│   │   ├── cifar100_classification_convnet_softmax.yml
│   │   └── default_cifar100.yml
│   ├── clevr/
│   │   ├── clevr_all_vgg_glove_lstm_concat_ffn.yml
│   │   ├── clevr_image_convnet_ffn.yml
│   │   ├── clevr_question_glove_lstm.yml
│   │   └── default_clevr.yml
│   ├── default/
│   │   ├── components/
│   │   │   ├── language/
│   │   │   │   ├── bow_encoder.yml
│   │   │   │   ├── label_indexer.yml
│   │   │   │   ├── sentence_indexer.yml
│   │   │   │   ├── sentence_one_hot_encoder.yml
│   │   │   │   ├── sentence_tokenizer.yml
│   │   │   │   └── word_decoder.yml
│   │   │   ├── losses/
│   │   │   │   └── nll_loss.yml
│   │   │   ├── masking/
│   │   │   │   ├── join_masked_predictions.yml
│   │   │   │   └── string_to_mask.yml
│   │   │   ├── models/
│   │   │   │   ├── general_usage/
│   │   │   │   │   ├── attention_decoder.yml
│   │   │   │   │   ├── feed_forward_network.yml
│   │   │   │   │   ├── recurrent_neural_network.yml
│   │   │   │   │   └── seq2seq.yml
│   │   │   │   ├── language/
│   │   │   │   │   ├── index_embeddings.yml
│   │   │   │   │   └── sentence_embeddings.yml
│   │   │   │   ├── multi_modal_reasoning/
│   │   │   │   │   ├── compact_bilinear_pooling.yml
│   │   │   │   │   ├── factorized_bilinear_pooling.yml
│   │   │   │   │   ├── low_rank_bilinear_pooling.yml
│   │   │   │   │   ├── question_driven_attention.yml
│   │   │   │   │   ├── relational_network.yml
│   │   │   │   │   └── self_attention.yml
│   │   │   │   └── vision/
│   │   │   │       ├── convnet_encoder.yml
│   │   │   │       ├── generic_image_encoder.yml
│   │   │   │       └── lenet5.yml
│   │   │   ├── publishers/
│   │   │   │   ├── global_variable_publisher.yml
│   │   │   │   └── stream_file_exporter.yml
│   │   │   ├── statistics/
│   │   │   │   ├── accuracy_statistics.yml
│   │   │   │   ├── batch_size_statistics.yml
│   │   │   │   ├── bleu_statistics.yml
│   │   │   │   └── precision_recall_statistics.yml
│   │   │   ├── tasks/
│   │   │   │   ├── image_text_to_class/
│   │   │   │   │   ├── clevr.yml
│   │   │   │   │   ├── gqa.yml
│   │   │   │   │   └── vqa_med_2019.yml
│   │   │   │   ├── image_to_class/
│   │   │   │   │   ├── cifar_100.yml
│   │   │   │   │   ├── mnist.yml
│   │   │   │   │   └── simple_molecules.yml
│   │   │   │   ├── text_to_class/
│   │   │   │   │   ├── dummy_language_identification.yml
│   │   │   │   │   ├── wily_language_identification.yml
│   │   │   │   │   └── wily_ngram_language_modeling.yml
│   │   │   │   └── text_to_text/
│   │   │   │       ├── translation_pairs.yml
│   │   │   │       └── wikitext_language_modeling.yml
│   │   │   ├── transforms/
│   │   │   │   ├── concatenate_tensor.yml
│   │   │   │   ├── list_to_tensor.yml
│   │   │   │   ├── non_linearity.yml
│   │   │   │   ├── reduce_tensor.yml
│   │   │   │   └── reshape_tensor.yml
│   │   │   └── viewers/
│   │   │       ├── image_viewer.yml
│   │   │       └── stream_viewer.yml
│   │   └── workers/
│   │       ├── offline_trainer.yml
│   │       ├── online_trainer.yml
│   │       └── processor.yml
│   ├── mnist/
│   │   ├── default_mnist.yml
│   │   ├── mnist_classification_convnet_softmax.yml
│   │   ├── mnist_classification_kfold_softmax.yml
│   │   ├── mnist_classification_lenet5.yml
│   │   ├── mnist_classification_softmax.yml
│   │   ├── mnist_classification_vf_2lenet5_2losses.yml
│   │   └── mnist_classification_vf_shared_convnet_2softmaxes_2losses.yml
│   ├── molecule_classification/
│   │   ├── default_molecule_classification.yml
│   │   ├── molecule_classification_convnet_softmax.yml
│   │   └── molecule_classification_vgg16_molecules.yml
│   ├── translation/
│   │   └── eng_fra_translation_enc_attndec.yml
│   ├── tutorials/
│   │   └── mnist_classification_convnet_softmax.yml
│   ├── vqa_med_2019/
│   │   ├── c1_classification/
│   │   │   ├── c1_classification_all_bow_vgg16_concat.yml
│   │   │   ├── c1_classification_all_rnn_vgg16_concat.yml
│   │   │   ├── c1_classification_image_cnn_softmax.yml
│   │   │   ├── c1_classification_image_size_softmax.yml
│   │   │   ├── c1_classification_question_mimic_rnn.yml
│   │   │   ├── c1_classification_question_onehot_bow.yml
│   │   │   ├── c1_classification_question_rnn.yml
│   │   │   ├── c1_classification_vf_question_rnn_separate_q_categorization.yml
│   │   │   └── default_c1_classification.yml
│   │   ├── c2_classification/
│   │   │   ├── c2_class_lstm_resnet152_ewm_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet152_rn_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_attn_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_coattn_mfb_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_ewm_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_mfb_cat_is.yml
│   │   │   ├── c2_class_lstm_resnet50_rn_cat_is.yml
│   │   │   ├── c2_class_lstm_selfattn.yml
│   │   │   ├── c2_class_lstm_vgg16_rn.yml
│   │   │   ├── c2_class_lstm_vgg16_rn_cat_is.yml
│   │   │   ├── c2_classification_all_rnn_vgg16_concat.yml
│   │   │   ├── c2_classification_all_rnn_vgg16_ewm.yml
│   │   │   ├── c2_classification_all_rnn_vgg16_ewm_size.yml
│   │   │   ├── c2_classification_all_rnn_vgg16_mcb.yml
│   │   │   ├── c2_word_answer_onehot_bow.yml
│   │   │   └── default_c2_classification.yml
│   │   ├── c3_classification/
│   │   │   ├── c3_classification_all_bow_vgg16_concat.yml
│   │   │   ├── c3_classification_all_concat.yml
│   │   │   ├── c3_classification_all_rnn_vgg16_concat.yml
│   │   │   ├── c3_classification_image_cnn_softmax.yml
│   │   │   ├── c3_classification_image_plus_size_concat.yml
│   │   │   ├── c3_classification_image_size_softmax.yml
│   │   │   ├── c3_classification_image_softmax.yml
│   │   │   ├── c3_classification_image_vgg16_softmax.yml
│   │   │   ├── c3_classification_question_onehot_bow.yml
│   │   │   ├── c3_classification_question_rnn.yml
│   │   │   └── default_c3_classification.yml
│   │   ├── c4_classification/
│   │   │   ├── c4_classification_all_rnn_vgg16_ewm_size.yml
│   │   │   ├── c4_enc_attndec.yml
│   │   │   ├── c4_enc_attndec_resnet152_ewm_cat_is.yml
│   │   │   ├── c4_frozen_if_gru_dec.yml
│   │   │   ├── c4_word_answer_glove_sum.yml
│   │   │   ├── c4_word_answer_mimic_sum.yml
│   │   │   ├── c4_word_answer_onehot_bow.yml
│   │   │   ├── c4_word_answer_onehot_sum.yml
│   │   │   └── default_c4_classification.yml
│   │   ├── default_vqa_med_2019.yml
│   │   ├── evaluation/
│   │   │   ├── deepta/
│   │   │   │   ├── glove_gru_resnet50_coattn_mfb_is_cat_ffn_c123_loss.yml
│   │   │   │   └── glove_gru_vgg16_coattn_mfb_is_cat_ffn_c1234_loss.yml
│   │   │   ├── example_mimic_lstm_vgg16_ewm_is_cat_ffn_c123_loss.yml
│   │   │   ├── frozen_if_ffn_c1234_loss.yml
│   │   │   ├── frozen_if_ffn_c123_loss.yml
│   │   │   ├── frozen_if_vf_5ffn_c1234yn_5losses.yml
│   │   │   ├── frozen_if_vf_5ffn_support_c1234yn_5losses.yml
│   │   │   └── tom/
│   │   │       ├── glove_lstm_resnet152_att_is_cat_ffn_c123_loss.yml
│   │   │       ├── glove_lstm_resnet152_mcb_is_cat_ffn_c123_loss.yml
│   │   │       ├── glove_lstm_vgg16_att_is_cat_ffn_c123_loss.yml
│   │   │       ├── glove_lstm_vgg16_ewm_is_cat_ffn_c123_loss.yml
│   │   │       └── glove_lstm_vgg16_mcb_is_cat_ffn_c123_loss.yml
│   │   ├── extend_answers.yml
│   │   ├── extend_answers_c4.yml
│   │   ├── frozen_pipelines/
│   │   │   ├── frozen_input_fusion_glove_lstm_vgg_att_is_cat.yml
│   │   │   ├── frozen_question_categorization_glove_rnn_ffn.yml
│   │   │   ├── frozen_word_answer_glove_sum.yml
│   │   │   └── input_fusion_processor_io.yml
│   │   ├── question_categorization/
│   │   │   ├── default_question_categorization.yml
│   │   │   ├── question_categorization_onehot_bow.yml
│   │   │   ├── question_categorization_onehot_rnn.yml
│   │   │   ├── question_categorization_rnn.yml
│   │   │   └── question_categorization_rnn_ffn.yml
│   │   └── vf/
│   │       ├── c1_binary_vf_cat_hard_shared_question_rnn_two_ffns_losses.yml
│   │       ├── c1_binary_vf_cat_rnn_shared_all_encoders_two_ffns_losses.yml
│   │       ├── c1_binary_vf_cat_rnn_shared_question_rnn_two_ffns_losses.yml
│   │       ├── c1_c2_c3_binary_vf_cat_rnn_shared_all_encoders_four_ffns_losses.yml
│   │       ├── c1_c3_binary_vf_cat_rnn_shared_all_encoders_three_ffns_losses.yml
│   │       ├── lstm_resnet152_is_cat_ffn_c123_no_binary_loss.yml
│   │       ├── lstm_resnet50_ewm_is_cat_ffn_c123_loss_ffn_yn_loss.yml
│   │       ├── lstm_resnet50_ewm_is_cat_ffn_c123_no_binary_loss.yml
│   │       ├── lstm_resnet50_is_cat_ffn_c123_no_binary_loss.yml
│   │       ├── lstm_vgg16_is_cat_ffn_c123_binary_yn_loss.yml
│   │       ├── lstm_vgg16_is_cat_ffn_c123_no_yn_loss.yml
│   │       └── lstm_vgg16_is_cat_ffn_only_yn_loss.yml
│   ├── wikitext/
│   │   ├── wikitext_language_modeling_encoder_attndecoder.yml
│   │   ├── wikitext_language_modeling_rnn.yml
│   │   ├── wikitext_language_modeling_seq2seq.yml
│   │   └── wikitext_language_modeling_seq2seq_simple.yml
│   └── wily/
│       ├── dummy_language_identification_bow.yml
│       ├── wily_language_identification_bow.yml
│       └── wily_ngram_language_modeling.yml
├── ptp/
│   ├── __init__.py
│   ├── application/
│   │   ├── __init__.py
│   │   ├── component_factory.py
│   │   ├── pipeline_manager.py
│   │   ├── sampler_factory.py
│   │   └── task_manager.py
│   ├── components/
│   │   ├── component.py
│   │   ├── language/
│   │   │   ├── __init__.py
│   │   │   ├── bow_encoder.py
│   │   │   ├── label_indexer.py
│   │   │   ├── sentence_indexer.py
│   │   │   ├── sentence_one_hot_encoder.py
│   │   │   ├── sentence_tokenizer.py
│   │   │   └── word_decoder.py
│   │   ├── losses/
│   │   │   ├── __init__.py
│   │   │   ├── loss.py
│   │   │   └── nll_loss.py
│   │   ├── masking/
│   │   │   ├── __init__.py
│   │   │   ├── join_masked_predictions.py
│   │   │   └── string_to_mask.py
│   │   ├── mixins/
│   │   │   ├── embeddings.py
│   │   │   ├── io.py
│   │   │   └── word_mappings.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── general_usage/
│   │   │   │   ├── attention_decoder.py
│   │   │   │   ├── feed_forward_network.py
│   │   │   │   ├── recurrent_neural_network.py
│   │   │   │   └── seq2seq.py
│   │   │   ├── language/
│   │   │   │   ├── index_embeddings.py
│   │   │   │   └── sentence_embeddings.py
│   │   │   ├── model.py
│   │   │   ├── multi_modal_reasoning/
│   │   │   │   ├── compact_bilinear_pooling.py
│   │   │   │   ├── factorized_bilinear_pooling.py
│   │   │   │   ├── low_rank_bilinear_pooling.py
│   │   │   │   ├── question_driven_attention.py
│   │   │   │   ├── relational_network.py
│   │   │   │   └── self_attention.py
│   │   │   └── vision/
│   │   │       ├── convnet_encoder.py
│   │   │       ├── generic_image_encoder.py
│   │   │       └── lenet5.py
│   │   ├── publishers/
│   │   │   ├── __init__.py
│   │   │   ├── global_variable_publisher.py
│   │   │   └── stream_file_exporter.py
│   │   ├── statistics/
│   │   │   ├── __init__.py
│   │   │   ├── accuracy_statistics.py
│   │   │   ├── batch_size_statistics.py
│   │   │   ├── bleu_statistics.py
│   │   │   └── precision_recall_statistics.py
│   │   ├── tasks/
│   │   │   ├── image_text_to_class/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── clevr.py
│   │   │   │   ├── gqa.py
│   │   │   │   └── vqa_med_2019.py
│   │   │   ├── image_to_class/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── cifar_100.py
│   │   │   │   ├── mnist.py
│   │   │   │   └── simple_molecules.py
│   │   │   ├── task.py
│   │   │   ├── text_to_class/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── dummy_language_identification.py
│   │   │   │   ├── language_identification.py
│   │   │   │   ├── wily_language_identification.py
│   │   │   │   └── wily_ngram_language_modeling.py
│   │   │   └── text_to_text/
│   │   │       ├── __init__.py
│   │   │       ├── translation_pairs.py
│   │   │       └── wikitext_language_modeling.py
│   │   ├── transforms/
│   │   │   ├── __init__.py
│   │   │   ├── concatenate_tensor.py
│   │   │   ├── list_to_tensor.py
│   │   │   ├── reduce_tensor.py
│   │   │   └── reshape_tensor.py
│   │   └── viewers/
│   │       ├── __init__.py
│   │       ├── image_viewer.py
│   │       └── stream_viewer.py
│   ├── configuration/
│   │   ├── __init__.py
│   │   ├── config_interface.py
│   │   ├── config_parsing.py
│   │   ├── config_registry.py
│   │   └── configuration_error.py
│   ├── data_types/
│   │   ├── __init__.py
│   │   ├── data_definition.py
│   │   └── data_streams.py
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── app_state.py
│   │   ├── data_streams_parallel.py
│   │   ├── globals_facade.py
│   │   ├── key_mappings_facade.py
│   │   ├── logger.py
│   │   ├── samplers.py
│   │   ├── singleton.py
│   │   ├── statistics_aggregator.py
│   │   ├── statistics_collector.py
│   │   └── termination_condition.py
│   └── workers/
│       ├── __init__.py
│       ├── offline_trainer.py
│       ├── online_trainer.py
│       ├── processor.py
│       ├── test_data_dict_parallel.py
│       ├── trainer.py
│       └── worker.py
├── setup.py
└── tests/
    ├── __init__.py
    ├── application/
    │   ├── pipeline_tests.py
    │   ├── sampler_factory_tests.py
    │   └── samplers_tests.py
    ├── components/
    │   ├── component_tests.py
    │   └── tasks/
    │       ├── clevr_tests.py
    │       ├── gqa_tests.py
    │       └── task_tests.py
    ├── configuration/
    │   ├── config_interface_tests.py
    │   ├── config_registry_tests.py
    │   └── handshaking_tests.py
    ├── data_types/
    │   ├── data_definition_tests.py
    │   └── data_streams_tests.py
    └── utils/
        ├── app_state_tests.py
        └── statistics_tests.py
Download .txt
SYMBOL INDEX (655 symbols across 95 files)

FILE: ptp/application/component_factory.py
  class ComponentFactory (line 26) | class ComponentFactory(object):
    method check_inheritance (line 32) | def check_inheritance(class_obj, parent_class_name):
    method build (line 45) | def build(name, config):

FILE: ptp/application/pipeline_manager.py
  class PipelineManager (line 37) | class PipelineManager(object):
    method __init__ (line 42) | def __init__(self, name, config):
    method build (line 73) | def build(self, use_logger=True):
    method save (line 200) | def save(self, chkpt_dir, training_status, loss):
    method load (line 270) | def load(self, checkpoint_file):
    method load_models (line 308) | def load_models(self):
    method freeze_models (line 381) | def freeze_models(self):
    method __getitem__ (line 402) | def __getitem__(self, number):
    method __len__ (line 415) | def __len__(self):
    method summarize_all_components_header (line 425) | def summarize_all_components_header(self):
    method summarize_all_components (line 443) | def summarize_all_components(self):
    method summarize_models_header (line 460) | def summarize_models_header(self):
    method summarize_models (line 476) | def summarize_models(self):
    method handshake (line 488) | def handshake(self, data_streams, log=True):
    method forward (line 520) | def forward(self, data_streams):
    method eval (line 546) | def eval(self):
    method train (line 554) | def train(self):
    method cuda (line 562) | def cuda(self):
    method zero_grad (line 589) | def zero_grad(self):
    method backward (line 597) | def backward(self, data_streams):
    method return_loss_on_batch (line 623) | def return_loss_on_batch(self, stat_col):
    method return_loss_on_set (line 634) | def return_loss_on_set(self, stat_agg):
    method parameters (line 646) | def parameters(self, recurse=True):
    method named_parameters (line 668) | def named_parameters(self, recurse=True):
    method add_statistics (line 677) | def add_statistics(self, stat_col):
    method collect_statistics (line 703) | def collect_statistics(self, stat_col, data_streams):
    method add_aggregators (line 726) | def add_aggregators(self, stat_agg):
    method aggregate_statistics (line 745) | def aggregate_statistics(self, stat_col, stat_agg):

FILE: ptp/application/sampler_factory.py
  class SamplerFactory (line 30) | class SamplerFactory(object):
    method build (line 37) | def build(task, config, task_subset_name):

FILE: ptp/application/task_manager.py
  class TaskManager (line 34) | class TaskManager(object):
    method __init__ (line 39) | def __init__(self, name, config):
    method worker_init_fn (line 62) | def worker_init_fn(self, worker_id):
    method build (line 85) | def build(self, log=True):
    method __len__ (line 149) | def __len__(self):
    method get_epoch_size (line 164) | def get_epoch_size(self):
    method initialize_epoch (line 194) | def initialize_epoch(self):
    method finalize_epoch (line 211) | def finalize_epoch(self):

FILE: ptp/components/component.py
  class Component (line 30) | class Component(abc.ABC):
    method __init__ (line 31) | def __init__(self, name, class_type, config):
    method summarize_io (line 88) | def summarize_io(self, priority = -1):
    method input_data_definitions (line 111) | def input_data_definitions(self):
    method output_data_definitions (line 121) | def output_data_definitions(self):
    method handshake_input_definitions (line 130) | def handshake_input_definitions(self, all_definitions, log_errors=True):
    method export_output_definitions (line 178) | def export_output_definitions(self, all_definitions, log_errors=True):
    method __call__ (line 203) | def __call__(self, data_streams):
    method add_statistics (line 213) | def add_statistics(self, stat_col):
    method collect_statistics (line 227) | def collect_statistics(self, stat_col, data_streams):
    method add_aggregators (line 246) | def add_aggregators(self, stat_agg):
    method aggregate_statistics (line 260) | def aggregate_statistics(self, stat_col, stat_agg):

FILE: ptp/components/language/bow_encoder.py
  class BOWEncoder (line 23) | class BOWEncoder(Component):
    method __init__ (line 30) | def  __init__(self, name, config):
    method input_data_definitions (line 51) | def input_data_definitions(self):
    method output_data_definitions (line 61) | def output_data_definitions(self):
    method __call__ (line 71) | def __call__(self, data_streams):
    method encode_sample (line 95) | def encode_sample(self, list_of_tokens):

FILE: ptp/components/language/label_indexer.py
  class LabelIndexer (line 24) | class LabelIndexer(Component, WordMappings):
    method __init__ (line 28) | def __init__(self, name, config):
    method input_data_definitions (line 51) | def input_data_definitions(self):
    method output_data_definitions (line 61) | def output_data_definitions(self):
    method __call__ (line 71) | def __call__(self, data_streams):

FILE: ptp/components/language/sentence_indexer.py
  class SentenceIndexer (line 24) | class SentenceIndexer(Component, WordMappings):
    method __init__ (line 31) | def __init__(self, name, config):
    method input_data_definitions (line 68) | def input_data_definitions(self):
    method output_data_definitions (line 88) | def output_data_definitions(self):
    method __call__ (line 104) | def __call__(self, data_streams):
    method sentences_to_tensor (line 123) | def sentences_to_tensor(self, data_streams):
    method tensor_indices_to_sentences (line 166) | def tensor_indices_to_sentences(self, data_streams):
    method tensor_distributions_to_sentences (line 198) | def tensor_distributions_to_sentences(self, data_streams):

FILE: ptp/components/language/sentence_one_hot_encoder.py
  class SentenceOneHotEncoder (line 24) | class SentenceOneHotEncoder(Component, WordMappings):
    method __init__ (line 28) | def __init__(self, name, config):
    method input_data_definitions (line 48) | def input_data_definitions(self):
    method output_data_definitions (line 58) | def output_data_definitions(self):
    method __call__ (line 68) | def __call__(self, data_streams):

FILE: ptp/components/language/sentence_tokenizer.py
  class SentenceTokenizer (line 27) | class SentenceTokenizer(Component):
    method __init__ (line 31) | def __init__(self, name, config):
    method input_data_definitions (line 81) | def input_data_definitions(self):
    method output_data_definitions (line 93) | def output_data_definitions(self):
    method tokenize_sample (line 105) | def tokenize_sample(self, text):
    method detokenize_sample (line 130) | def detokenize_sample(self, sample):
    method __call__ (line 140) | def __call__(self, data_streams):

FILE: ptp/components/language/word_decoder.py
  class WordDecoder (line 24) | class WordDecoder(Component, WordMappings):
    method __init__ (line 28) | def __init__(self, name, config):
    method input_data_definitions (line 51) | def input_data_definitions(self):
    method output_data_definitions (line 61) | def output_data_definitions(self):
    method __call__ (line 71) | def __call__(self, data_streams):

FILE: ptp/components/losses/loss.py
  class Loss (line 24) | class Loss(Component):
    method __init__ (line 32) | def __init__(self, name, class_type, config):
    method loss_keys (line 53) | def loss_keys(self):
    method add_statistics (line 63) | def add_statistics(self, stat_col):
    method collect_statistics (line 73) | def collect_statistics(self, stat_col, data_streams):
    method add_aggregators (line 82) | def add_aggregators(self, stat_agg):
    method aggregate_statistics (line 96) | def aggregate_statistics(self, stat_col, stat_agg):

FILE: ptp/components/losses/nll_loss.py
  class NLLLoss (line 24) | class NLLLoss(Loss):
    method __init__ (line 29) | def __init__(self, name, config):
    method input_data_definitions (line 59) | def input_data_definitions(self):
    method output_data_definitions (line 73) | def output_data_definitions(self):
    method __call__ (line 84) | def __call__(self, data_streams):

FILE: ptp/components/masking/join_masked_predictions.py
  class JoinMaskedPredictions (line 26) | class JoinMaskedPredictions(Component):
    method __init__ (line 36) | def __init__(self, name, config):
    method input_data_definitions (line 85) | def input_data_definitions(self):
    method output_data_definitions (line 103) | def output_data_definitions(self):
    method __call__ (line 115) | def __call__(self, data_streams):

FILE: ptp/components/masking/string_to_mask.py
  class StringToMask (line 25) | class StringToMask(Component):
    method __init__ (line 30) | def __init__(self, name, config):
    method input_data_definitions (line 51) | def input_data_definitions(self):
    method output_data_definitions (line 61) | def output_data_definitions(self):
    method __call__ (line 72) | def __call__(self, data_streams):

FILE: ptp/components/mixins/embeddings.py
  function load_pretrained_embeddings (line 29) | def load_pretrained_embeddings(logger, folder, embeddings_name, word_to_...

FILE: ptp/components/mixins/io.py
  function load_pickle (line 28) | def load_pickle(logger, filename, encoding="ASCII"):
  function save_nparray_to_csv_file (line 48) | def save_nparray_to_csv_file(folder, filename, nparray, sep=','):
  function load_nparray_from_csv_file (line 72) | def load_nparray_from_csv_file(folder, filename, dtype=float, sep=','):
  function save_string_list_to_txt_file (line 97) | def save_string_list_to_txt_file(folder, filename, data):
  function load_string_list_from_txt_file (line 117) | def load_string_list_from_txt_file(folder, filename):
  function get_project_root (line 133) | def get_project_root() -> Path:
  function check_file_existence (line 140) | def check_file_existence(folder, filename):
  function check_files_existence (line 157) | def check_files_existence(folder, filenames):
  function download (line 184) | def download(folder, filename, url):
  function reporthook (line 222) | def reporthook(count, block_size, total_size):
  function download_extract_zip_file (line 238) | def download_extract_zip_file(logger, folder, url, zipfile_name):
  function move_files_between_dirs (line 266) | def move_files_between_dirs(logger, source_folder, dest_folder, filenames):

FILE: ptp/components/mixins/word_mappings.py
  class WordMappings (line 20) | class WordMappings(object):
    method __init__ (line 28) | def __init__(self): #, name, class_type, config):
  function load_word_mappings_from_csv_file (line 91) | def load_word_mappings_from_csv_file(logger, folder, filename):
  function save_word_mappings_to_csv_file (line 130) | def save_word_mappings_to_csv_file(logger, folder, filename, word_to_ix,...
  function pad_trunc_list (line 163) | def pad_trunc_list(l: list, length: int, padding_value = 0, eos_value = ...

FILE: ptp/components/models/general_usage/attention_decoder.py
  class AttentionDecoder (line 24) | class AttentionDecoder(Model):
    method __init__ (line 33) | def __init__(self, name, config):
    method activation2output (line 121) | def activation2output(self, activations):
    method input_data_definitions (line 141) | def input_data_definitions(self):
    method output_data_definitions (line 156) | def output_data_definitions(self):
    method forward (line 176) | def forward(self, data_streams):

FILE: ptp/components/models/general_usage/feed_forward_network.py
  class FeedForwardNetwork (line 24) | class FeedForwardNetwork(Model):
    method __init__ (line 29) | def __init__(self, name, config):
    method input_data_definitions (line 106) | def input_data_definitions(self):
    method output_data_definitions (line 117) | def output_data_definitions(self):
    method forward (line 127) | def forward(self, data_streams):

FILE: ptp/components/models/general_usage/recurrent_neural_network.py
  class RecurrentNeuralNetwork (line 24) | class RecurrentNeuralNetwork(Model):
    method __init__ (line 28) | def __init__(self, name, config):
    method initialize_hiddens_state (line 177) | def initialize_hiddens_state(self, batch_size):
    method activation_to_output_pass (line 190) | def activation_to_output_pass(self, activations):
    method input_data_definitions (line 214) | def input_data_definitions(self):
    method output_data_definitions (line 237) | def output_data_definitions(self):
    method forward (line 262) | def forward(self, data_streams):

FILE: ptp/components/models/general_usage/seq2seq.py
  class Seq2Seq (line 24) | class Seq2Seq(Model):
    method __init__ (line 28) | def __init__(self, name, config):
    method initialize_hiddens_state (line 129) | def initialize_hiddens_state(self, batch_size):
    method input_data_definitions (line 141) | def input_data_definitions(self):
    method output_data_definitions (line 153) | def output_data_definitions(self):
    method forward (line 165) | def forward(self, data_streams):

FILE: ptp/components/models/language/index_embeddings.py
  class IndexEmbeddings (line 27) | class IndexEmbeddings(Model):
    method __init__ (line 33) | def __init__(self, name, config):
    method input_data_definitions (line 62) | def input_data_definitions(self):
    method output_data_definitions (line 73) | def output_data_definitions(self):
    method forward (line 84) | def forward(self, data_streams):

FILE: ptp/components/models/language/sentence_embeddings.py
  class SentenceEmbeddings (line 30) | class SentenceEmbeddings(Model, WordMappings):
    method __init__ (line 42) | def __init__(self, name, config):
    method input_data_definitions (line 80) | def input_data_definitions(self):
    method output_data_definitions (line 91) | def output_data_definitions(self):
    method forward (line 102) | def forward(self, data_streams):

FILE: ptp/components/models/model.py
  class Model (line 26) | class Model(Module, Component):
    method __init__ (line 38) | def __init__(self, name, class_type, config):
    method save_to_checkpoint (line 67) | def save_to_checkpoint(self, chkpt):
    method load_from_checkpoint (line 76) | def load_from_checkpoint(self, chkpt, section=None):
    method freeze (line 88) | def freeze(self):
    method summarize (line 98) | def summarize(self):
    method recursive_summarize (line 122) | def recursive_summarize(self, module_, indent_, module_name_):

FILE: ptp/components/models/multi_modal_reasoning/compact_bilinear_pooling.py
  class CompactBilinearPooling (line 28) | class CompactBilinearPooling(Model):
    method __init__ (line 41) | def __init__(self, name, config):
    method generate_count_sketch_projection_matrix (line 73) | def generate_count_sketch_projection_matrix(self, input_size, output_s...
    method input_data_definitions (line 105) | def input_data_definitions(self):
    method output_data_definitions (line 117) | def output_data_definitions(self):
    method forward (line 127) | def forward(self, data_streams):

FILE: ptp/components/models/multi_modal_reasoning/factorized_bilinear_pooling.py
  class FactorizedBilinearPooling (line 27) | class FactorizedBilinearPooling(Model):
    method __init__ (line 35) | def __init__(self, name, config):
    method input_data_definitions (line 79) | def input_data_definitions(self):
    method output_data_definitions (line 91) | def output_data_definitions(self):
    method forward (line 101) | def forward(self, data_streams):

FILE: ptp/components/models/multi_modal_reasoning/low_rank_bilinear_pooling.py
  class LowRankBilinearPooling (line 27) | class LowRankBilinearPooling(Model):
    method __init__ (line 34) | def __init__(self, name, config):
    method input_data_definitions (line 72) | def input_data_definitions(self):
    method output_data_definitions (line 84) | def output_data_definitions(self):
    method forward (line 94) | def forward(self, data_streams):

FILE: ptp/components/models/multi_modal_reasoning/question_driven_attention.py
  class QuestionDrivenAttention (line 27) | class QuestionDrivenAttention(Model):
    method __init__ (line 36) | def __init__(self, name, config):
    method input_data_definitions (line 94) | def input_data_definitions(self):
    method output_data_definitions (line 106) | def output_data_definitions(self):
    method forward (line 116) | def forward(self, data_streams):
  function tile_2d_over_nd (line 160) | def tile_2d_over_nd(feature_vector, feature_map):
  function apply_attention (line 170) | def apply_attention(input, attention):

FILE: ptp/components/models/multi_modal_reasoning/relational_network.py
  class RelationalNetwork (line 28) | class RelationalNetwork(Model):
    method __init__ (line 37) | def __init__(self, name, config):
    method input_data_definitions (line 106) | def input_data_definitions(self):
    method output_data_definitions (line 118) | def output_data_definitions(self):
    method forward (line 128) | def forward(self, data_streams):

FILE: ptp/components/models/multi_modal_reasoning/self_attention.py
  class SelfAttention (line 27) | class SelfAttention(Model):
    method __init__ (line 37) | def __init__(self, name, config):
    method input_data_definitions (line 70) | def input_data_definitions(self):
    method output_data_definitions (line 81) | def output_data_definitions(self):
    method forward (line 91) | def forward(self, data_streams):

FILE: ptp/components/models/vision/convnet_encoder.py
  class ConvNetEncoder (line 28) | class ConvNetEncoder(Model):
    method __init__ (line 34) | def __init__(self, name, config):
    method input_data_definitions (line 209) | def input_data_definitions(self):
    method output_data_definitions (line 220) | def output_data_definitions(self):
    method forward (line 230) | def forward(self, data_streams):

FILE: ptp/components/models/vision/generic_image_encoder.py
  class GenericImageEncoder (line 30) | class GenericImageEncoder(Model):
    method __init__ (line 34) | def __init__(self, name, config):
    method input_data_definitions (line 142) | def input_data_definitions(self):
    method output_data_definitions (line 153) | def output_data_definitions(self):
    method forward (line 168) | def forward(self, data_streams):

FILE: ptp/components/models/vision/lenet5.py
  class LeNet5 (line 27) | class LeNet5(Model):
    method __init__ (line 31) | def __init__(self, name, config):
    method input_data_definitions (line 59) | def input_data_definitions(self):
    method output_data_definitions (line 70) | def output_data_definitions(self):
    method forward (line 80) | def forward(self, data_streams):

FILE: ptp/components/publishers/global_variable_publisher.py
  class GlobalVariablePublisher (line 22) | class GlobalVariablePublisher(Component):
    method __init__ (line 28) | def __init__(self, name, config):
    method input_data_definitions (line 65) | def input_data_definitions(self):
    method output_data_definitions (line 73) | def output_data_definitions(self):
    method __call__ (line 82) | def __call__(self, data_streams):

FILE: ptp/components/publishers/stream_file_exporter.py
  class StreamFileExporter (line 26) | class StreamFileExporter(Component):
    method __init__ (line 31) | def __init__(self, name, config):
    method input_data_definitions (line 70) | def input_data_definitions(self):
    method output_data_definitions (line 80) | def output_data_definitions(self):
    method __call__ (line 89) | def __call__(self, data_streams):

FILE: ptp/components/statistics/accuracy_statistics.py
  class AccuracyStatistics (line 27) | class AccuracyStatistics(Component):
    method __init__ (line 33) | def __init__(self, name, config):
    method input_data_definitions (line 62) | def input_data_definitions(self):
    method output_data_definitions (line 82) | def output_data_definitions(self):
    method __call__ (line 91) | def __call__(self, data_streams):
    method calculate_accuracy (line 98) | def calculate_accuracy(self, data_streams):
    method add_statistics (line 146) | def add_statistics(self, stat_col):
    method collect_statistics (line 156) | def collect_statistics(self, stat_col, data_streams):
    method add_aggregators (line 168) | def add_aggregators(self, stat_agg):
    method aggregate_statistics (line 181) | def aggregate_statistics(self, stat_col, stat_agg):

FILE: ptp/components/statistics/batch_size_statistics.py
  class BatchSizeStatistics (line 23) | class BatchSizeStatistics(Component):
    method __init__ (line 29) | def __init__(self, name, config):
    method input_data_definitions (line 46) | def input_data_definitions(self):
    method output_data_definitions (line 56) | def output_data_definitions(self):
    method __call__ (line 65) | def __call__(self, data_streams):
    method add_statistics (line 71) | def add_statistics(self, stat_col):
    method collect_statistics (line 80) | def collect_statistics(self, stat_col, data_streams):
    method add_aggregators (line 89) | def add_aggregators(self, stat_agg):
    method aggregate_statistics (line 99) | def aggregate_statistics(self, stat_col, stat_agg):

FILE: ptp/components/statistics/bleu_statistics.py
  class BLEUStatistics (line 28) | class BLEUStatistics(Component):
    method __init__ (line 36) | def __init__(self, name, config):
    method input_data_definitions (line 77) | def input_data_definitions(self):
    method output_data_definitions (line 98) | def output_data_definitions(self):
    method __call__ (line 107) | def __call__(self, data_streams):
    method calculate_BLEU (line 114) | def calculate_BLEU(self, data_streams):
    method add_statistics (line 179) | def add_statistics(self, stat_col):
    method collect_statistics (line 188) | def collect_statistics(self, stat_col, data_streams):
    method add_aggregators (line 197) | def add_aggregators(self, stat_agg):
    method aggregate_statistics (line 210) | def aggregate_statistics(self, stat_col, stat_agg):

FILE: ptp/components/statistics/precision_recall_statistics.py
  class PrecisionRecallStatistics (line 27) | class PrecisionRecallStatistics(Component):
    method __init__ (line 33) | def __init__(self, name, config):
    method input_data_definitions (line 85) | def input_data_definitions(self):
    method output_data_definitions (line 104) | def output_data_definitions(self):
    method __call__ (line 112) | def __call__(self, data_streams):
    method calculate_statistics (line 153) | def calculate_statistics(self, data_streams):
    method add_statistics (line 233) | def add_statistics(self, stat_col):
    method collect_statistics (line 248) | def collect_statistics(self, stat_col, data_streams):
    method add_aggregators (line 283) | def add_aggregators(self, stat_agg):
    method aggregate_statistics (line 298) | def aggregate_statistics(self, stat_col, stat_agg):

FILE: ptp/components/tasks/image_text_to_class/clevr.py
  class CLEVR (line 34) | class CLEVR(Task):
    method __init__ (line 61) | def __init__(self, name, config):
    method output_data_definitions (line 223) | def output_data_definitions(self):
    method __len__ (line 250) | def __len__(self):
    method load_dataset (line 259) | def load_dataset(self, source_data_file):
    method get_image (line 277) | def get_image(self, img_id):
    method __getitem__ (line 313) | def __getitem__(self, index):
    method collate_fn (line 359) | def collate_fn(self, batch):

FILE: ptp/components/tasks/image_text_to_class/gqa.py
  class GQA (line 34) | class GQA(Task):
    method __init__ (line 51) | def __init__(self, name, config):
    method output_data_definitions (line 157) | def output_data_definitions(self):
    method __len__ (line 184) | def __len__(self):
    method load_dataset (line 193) | def load_dataset(self, source_files):
    method get_image (line 238) | def get_image(self, img_id):
    method __getitem__ (line 274) | def __getitem__(self, index):
    method collate_fn (line 311) | def collate_fn(self, batch):

FILE: ptp/components/tasks/image_text_to_class/vqa_med_2019.py
  class VQAMED2019 (line 39) | class VQAMED2019(Task):
    method __init__ (line 65) | def __init__(self, name, config):
    method output_data_definitions (line 304) | def output_data_definitions(self):
    method __len__ (line 337) | def __len__(self):
    method filter_sources (line 346) | def filter_sources(self, source_files, source_image_folders, source_ca...
    method calculate_and_export_sample_weights (line 376) | def calculate_and_export_sample_weights(self, filename):
    method preprocess_text (line 434) | def preprocess_text(self, text, lowercase = False, remove_punctuation ...
    method random_remove_stop_words (line 482) | def random_remove_stop_words(self, words):
    method random_shuffle_words (line 511) | def random_shuffle_words(self, words):
    method load_dataset (line 538) | def load_dataset(self, source_files, source_image_folders, source_cate...
    method load_testset_with_answers (line 613) | def load_testset_with_answers(self, data_file, image_folder):
    method load_testset_without_answers (line 688) | def load_testset_without_answers(self, data_file, image_folder):
    method get_image (line 756) | def get_image(self, img_id, img_folder):
    method __getitem__ (line 803) | def __getitem__(self, index):
    method predict_yes_no (line 867) | def predict_yes_no(self, qtext):
    method collate_fn (line 879) | def collate_fn(self, batch):

FILE: ptp/components/tasks/image_to_class/cifar_100.py
  class CIFAR100 (line 27) | class CIFAR100(Task):
    method __init__ (line 36) | def __init__(self, name, config):
    method __len__ (line 158) | def __len__(self):
    method output_data_definitions (line 166) | def output_data_definitions(self):
    method __getitem__ (line 182) | def __getitem__(self, index):

FILE: ptp/components/tasks/image_to_class/mnist.py
  class MNIST (line 27) | class MNIST(Task):
    method __init__ (line 42) | def __init__(self, name, config):
    method __len__ (line 114) | def __len__(self):
    method output_data_definitions (line 122) | def output_data_definitions(self):
    method __getitem__ (line 136) | def __getitem__(self, index):

FILE: ptp/components/tasks/image_to_class/simple_molecules.py
  class SimpleMolecules (line 34) | class SimpleMolecules(Task):
    method __init__ (line 40) | def __init__(self, name, config):
    method load_dataset (line 159) | def load_dataset(self, source_data_file):
    method __len__ (line 177) | def __len__(self):
    method output_data_definitions (line 186) | def output_data_definitions(self):
    method get_image (line 199) | def get_image(self, img_id):
    method __getitem__ (line 223) | def __getitem__(self, index):

FILE: ptp/components/tasks/task.py
  class Task (line 26) | class Task(Component, Dataset):
    method __init__ (line 37) | def __init__(self, name, class_type, config):
    method summarize_io (line 78) | def summarize_io(self, priority = -1):
    method __call__ (line 94) | def __call__(self, data_streams):
    method input_data_definitions (line 102) | def input_data_definitions(self):
    method create_data_streams (line 112) | def create_data_streams(self, index, data_definitions = None):
    method collate_fn (line 132) | def collate_fn(self, batch):
    method initialize_epoch (line 159) | def initialize_epoch(self, epoch):
    method finalize_epoch (line 172) | def finalize_epoch(self, epoch):
    method curriculum_learning_initialize (line 186) | def curriculum_learning_initialize(self, curriculum_config):
    method curriculum_learning_update_params (line 204) | def curriculum_learning_update_params(self, episode, epoch):

FILE: ptp/components/tasks/text_to_class/dummy_language_identification.py
  class DummyLanguageIdentification (line 24) | class DummyLanguageIdentification(LanguageIdentification):
    method __init__ (line 31) | def __init__(self, name, config):
    method generate_dataset (line 68) | def generate_dataset(self):

FILE: ptp/components/tasks/text_to_class/language_identification.py
  class LanguageIdentification (line 21) | class LanguageIdentification(Task):
    method __init__ (line 26) | def __init__(self, name, class_type, config):
    method output_data_definitions (line 48) | def output_data_definitions(self):
    method __len__ (line 61) | def __len__(self):
    method __getitem__ (line 70) | def __getitem__(self, index):

FILE: ptp/components/tasks/text_to_class/wily_language_identification.py
  class WiLYLanguageIdentification (line 23) | class WiLYLanguageIdentification(LanguageIdentification):
    method __init__ (line 33) | def __init__(self, name, config):

FILE: ptp/components/tasks/text_to_class/wily_ngram_language_modeling.py
  class WiLYNGramLanguageModeling (line 24) | class WiLYNGramLanguageModeling(Task):
    method __init__ (line 32) | def __init__(self, name, config):
    method output_data_definitions (line 110) | def output_data_definitions(self):
    method __len__ (line 124) | def __len__(self):
    method __getitem__ (line 133) | def __getitem__(self, index):

FILE: ptp/components/tasks/text_to_text/translation_pairs.py
  class TranslationPairs (line 31) | class TranslationPairs(Task):
    method __init__ (line 39) | def __init__(self, name, config):
    method output_data_definitions (line 161) | def output_data_definitions(self):
    method unicodeToAscii (line 176) | def unicodeToAscii(s):
    method normalizeString (line 184) | def normalizeString(self, s):
    method __len__ (line 190) | def __len__(self):
    method __getitem__ (line 199) | def __getitem__(self, index):
    method collate_fn (line 215) | def collate_fn(self, batch):

FILE: ptp/components/tasks/text_to_text/wikitext_language_modeling.py
  class WikiTextLanguageModeling (line 27) | class WikiTextLanguageModeling(Task):
    method __init__ (line 42) | def __init__(self, name, config):
    method output_data_definitions (line 131) | def output_data_definitions(self):
    method __len__ (line 144) | def __len__(self):
    method __getitem__ (line 153) | def __getitem__(self, index):
    method collate_fn (line 170) | def collate_fn(self, batch):

FILE: ptp/components/transforms/concatenate_tensor.py
  class ConcatenateTensor (line 25) | class ConcatenateTensor(Component):
    method __init__ (line 31) | def __init__(self, name, config):
    method input_data_definitions (line 67) | def input_data_definitions(self):
    method output_data_definitions (line 78) | def output_data_definitions(self):
    method __call__ (line 89) | def __call__(self, data_streams):

FILE: ptp/components/transforms/list_to_tensor.py
  class ListToTensor (line 25) | class ListToTensor(Component):
    method __init__ (line 31) | def __init__(self, name, config):
    method input_data_definitions (line 57) | def input_data_definitions(self):
    method output_data_definitions (line 70) | def output_data_definitions(self):
    method __call__ (line 84) | def __call__(self, data_streams):

FILE: ptp/components/transforms/reduce_tensor.py
  class ReduceTensor (line 26) | class ReduceTensor(Component):
    method __init__ (line 32) | def __init__(self, name, config):
    method input_data_definitions (line 75) | def input_data_definitions(self):
    method output_data_definitions (line 91) | def output_data_definitions(self):
    method __call__ (line 116) | def __call__(self, data_streams):

FILE: ptp/components/transforms/reshape_tensor.py
  class ReshapeTensor (line 25) | class ReshapeTensor(Component):
    method __init__ (line 31) | def __init__(self, name, config):
    method input_data_definitions (line 56) | def input_data_definitions(self):
    method output_data_definitions (line 66) | def output_data_definitions(self):
    method __call__ (line 77) | def __call__(self, data_streams):

FILE: ptp/components/viewers/image_viewer.py
  class ImageViewer (line 27) | class ImageViewer(Component):
    method __init__ (line 32) | def __init__(self, name, config):
    method input_data_definitions (line 56) | def input_data_definitions(self):
    method output_data_definitions (line 69) | def output_data_definitions(self):
    method __call__ (line 78) | def __call__(self, data_streams):

FILE: ptp/components/viewers/stream_viewer.py
  class StreamViewer (line 26) | class StreamViewer(Component):
    method __init__ (line 32) | def __init__(self, name, config):
    method input_data_definitions (line 56) | def input_data_definitions(self):
    method output_data_definitions (line 66) | def output_data_definitions(self):
    method __call__ (line 75) | def __call__(self, data_streams):

FILE: ptp/configuration/config_interface.py
  class ConfigInterface (line 26) | class ConfigInterface(Mapping):
    method __init__ (line 43) | def __init__(self, *keys):
    method _lookup (line 76) | def _lookup(self, *keys):
    method _nest_dict (line 99) | def _nest_dict(self, d: dict):
    method to_dict (line 125) | def to_dict(self):
    method __getitem__ (line 132) | def __getitem__(self, key):
    method __len__ (line 150) | def __len__(self):
    method __iter__ (line 158) | def __iter__(self):
    method __eq__ (line 166) | def __eq__(self, other):
    method leafs (line 177) | def leafs(self):
    method set_leaf (line 189) | def set_leaf(self, leaf_key, leaf_value):
    method add_default_params (line 219) | def add_default_params(self, default_params: dict):
    method add_config_params (line 239) | def add_config_params(self, config_params: dict):
    method del_default_params (line 258) | def del_default_params(self, key):
    method del_config_params (line 274) | def del_config_params(self, key):
    method add_config_params_from_yaml (line 290) | def add_config_params_from_yaml(self, yaml_path: str):

FILE: ptp/configuration/config_parsing.py
  function display_globals (line 25) | def display_globals(logger, globals_dict):
  function display_parsing_results (line 43) | def display_parsing_results(logger, parsed_args, unparsed_args):
  function export_experiment_configuration_to_yml (line 72) | def export_experiment_configuration_to_yml(logger, log_dir, filename, co...
  function load_class_default_config_file (line 112) | def load_class_default_config_file(class_type):
  function recurrent_config_parse (line 149) | def recurrent_config_parse(configs_to_parse: list, configs_parsed: list,...
  function reverse_order_config_load (line 211) | def reverse_order_config_load(config_interface_obj, configs_to_load):
  function get_value_list_from_dictionary (line 225) | def get_value_list_from_dictionary(key, parameter_dict, accepted_values ...
  function get_value_from_dictionary (line 258) | def get_value_from_dictionary(key, parameter_dict, accepted_values = []):

FILE: ptp/configuration/config_registry.py
  class MetaSingletonABC (line 27) | class MetaSingletonABC(SingletonMetaClass, ABCMeta):
  class ConfigRegistry (line 35) | class ConfigRegistry(Mapping, metaclass=MetaSingletonABC):
    method __init__ (line 55) | def __init__(self):
    method _clear_registry (line 73) | def _clear_registry(self):
    method _update_params (line 87) | def _update_params(self):
    method add_default_params (line 97) | def add_default_params(self, default_params: dict):
    method add_config_params (line 116) | def add_config_params(self, config_params: dict):
    method del_default_params (line 134) | def del_default_params(self, keypath: list):
    method del_config_params (line 148) | def del_config_params(self, keypath: list):
    method __getitem__ (line 162) | def __getitem__(self, key):
    method __iter__ (line 176) | def __iter__(self):
    method __len__ (line 184) | def __len__(self):
    method __eq__ (line 192) | def __eq__(self, other):
    method update_dict_recursively (line 203) | def update_dict_recursively(self, current_node, update_node):
    method delete_subtree (line 230) | def delete_subtree(current_dict, keypath: list):

FILE: ptp/configuration/configuration_error.py
  class ConfigurationError (line 20) | class ConfigurationError(Exception):
    method __init__ (line 22) | def __init__(self, msg):
    method __str__ (line 26) | def __str__(self):

FILE: ptp/data_types/data_definition.py
  class DataDefinition (line 24) | class DataDefinition(namedtuple("DataDefinition", 'dimensions types desc...

FILE: ptp/data_types/data_streams.py
  class DataStreams (line 23) | class DataStreams(collections.abc.MutableMapping):
    method __init__ (line 35) | def __init__(self, *args, **kwargs):
    method __setitem__ (line 50) | def __setitem__(self, key, value, addkey=False):
    method publish (line 74) | def publish(self, dict_to_add):
    method reinitialize (line 94) | def reinitialize(self, streams_to_leave):
    method __getitem__ (line 108) | def __getitem__(self, key):
    method __delitem__ (line 119) | def __delitem__(self, key, delkey=False):
    method __iter__ (line 141) | def __iter__(self):
    method __len__ (line 144) | def __len__(self):
    method __str__ (line 147) | def __str__(self):
    method __repr__ (line 154) | def __repr__(self):
    method to (line 162) | def to(self, device=None, keys_to_move=None, non_blocking=False):

FILE: ptp/utils/app_state.py
  class AppState (line 27) | class AppState(metaclass=SingletonMetaClass):
    method __init__ (line 47) | def __init__(self):
    method set_types (line 80) | def set_types(self):
    method set_cpu_types (line 101) | def set_cpu_types(self):
    method set_gpu_types (line 115) | def set_gpu_types(self):
    method globalkeys (line 129) | def globalkeys(self):
    method globalitems (line 137) | def globalitems(self):
    method __setitem__ (line 145) | def __setitem__(self, key, value, override=False):
    method __getitem__ (line 169) | def __getitem__(self, key):

FILE: ptp/utils/data_streams_parallel.py
  function data_streams_scatter (line 29) | def data_streams_scatter(inputs, target_gpus, dim=0):
  function data_streams_scatter_kwargs (line 62) | def data_streams_scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
  function data_streams_gather (line 75) | def data_streams_gather(outputs, target_device, dim=0):
  class DataStreamsParallel (line 109) | class DataStreamsParallel(torch.nn.DataParallel):
    method __init__ (line 117) | def __init__(self, module, device_ids=None, output_device=None, dim=0):
    method forward (line 120) | def forward(self, *inputs, **kwargs):
    method replicate (line 159) | def replicate(self, module, device_ids):
    method scatter (line 162) | def scatter(self, inputs, kwargs, device_ids):
    method parallel_apply (line 165) | def parallel_apply(self, replicas, inputs, kwargs):
    method gather (line 168) | def gather(self, outputs, output_device):
    method add_statistics (line 171) | def add_statistics(self, stat_col):
    method collect_statistics (line 180) | def collect_statistics(self, stat_col, data_streams):
    method add_aggregators (line 192) | def add_aggregators(self, stat_agg):
    method aggregate_statistics (line 201) | def aggregate_statistics(self, stat_col, stat_agg):

FILE: ptp/utils/globals_facade.py
  class GlobalsFacade (line 21) | class GlobalsFacade(object):
    method __init__ (line 25) | def __init__(self, key_mappings):
    method __setitem__ (line 35) | def __setitem__(self, key, value):
    method __getitem__ (line 49) | def __getitem__(self, key):

FILE: ptp/utils/key_mappings_facade.py
  class KeyMappingsFacade (line 19) | class KeyMappingsFacade(object):
    method __init__ (line 23) | def __init__(self, key_mappings):
    method __getitem__ (line 32) | def __getitem__(self, key):

FILE: ptp/utils/logger.py
  function initialize_logger (line 24) | def initialize_logger(name, add_file_handler = True):
  function add_file_handler_to_logger (line 69) | def add_file_handler_to_logger(logger):

FILE: ptp/utils/samplers.py
  class kFoldRandomSampler (line 26) | class kFoldRandomSampler(Sampler):
    method __init__ (line 36) | def __init__(self, num_samples, num_folds, epochs_per_fold = 1, all_bu...
    method regenerate_indices (line 82) | def regenerate_indices(self):
    method __iter__ (line 118) | def __iter__(self):
    method __len__ (line 137) | def __len__(self):
  class kFoldWeightedRandomSampler (line 144) | class kFoldWeightedRandomSampler(kFoldRandomSampler):
    method __init__ (line 154) | def __init__(self, weights, num_samples, num_folds, epochs_per_fold = ...
    method __iter__ (line 187) | def __iter__(self):

FILE: ptp/utils/singleton.py
  class SingletonMetaClass (line 20) | class SingletonMetaClass(type):
    method __call__ (line 23) | def __call__(cls, *args, **kwargs):

FILE: ptp/utils/statistics_aggregator.py
  class StatisticsAggregator (line 23) | class StatisticsAggregator(StatisticsCollector):
    method __init__ (line 40) | def __init__(self):
    method add_aggregator (line 52) | def add_aggregator(self, key, formatting):
    method __getitem__ (line 73) | def __getitem__(self, key):
    method __setitem__ (line 85) | def __setitem__(self, key, value):
    method __delitem__ (line 98) | def __delitem__(self, key):
    method __len__ (line 108) | def __len__(self):
    method __eq__ (line 114) | def __eq__(self, other):
    method __iter__ (line 124) | def __iter__(self):
    method initialize_csv_file (line 131) | def initialize_csv_file(self, log_dir, filename):
    method export_to_csv (line 148) | def export_to_csv(self, csv_file=None):
    method export_to_checkpoint (line 182) | def export_to_checkpoint(self):
    method export_to_string (line 201) | def export_to_string(self, additional_tag=''):
    method export_to_tensorboard (line 234) | def export_to_tensorboard(self, tb_writer = None):

FILE: ptp/utils/statistics_collector.py
  class StatisticsCollector (line 23) | class StatisticsCollector(Mapping):
    method __init__ (line 32) | def __init__(self):
    method add_statistics (line 45) | def add_statistics(self, key, formatting):
    method __getitem__ (line 61) | def __getitem__(self, key):
    method __setitem__ (line 73) | def __setitem__(self, key, value):
    method __delitem__ (line 83) | def __delitem__(self, key):
    method __len__ (line 92) | def __len__(self):
    method __iter__ (line 98) | def __iter__(self):
    method __eq__ (line 104) | def __eq__(self, other):
    method empty (line 114) | def empty(self):
    method base_initialize_csv_file (line 123) | def base_initialize_csv_file(self, log_dir, filename, keys):
    method initialize_csv_file (line 160) | def initialize_csv_file(self, log_dir, filename):
    method export_to_csv (line 177) | def export_to_csv(self, csv_file=None):
    method export_to_checkpoint (line 212) | def export_to_checkpoint(self):
    method export_to_string (line 232) | def export_to_string(self, additional_tag=''):
    method initialize_tensorboard (line 266) | def initialize_tensorboard(self, tb_writer):
    method export_to_tensorboard (line 272) | def export_to_tensorboard(self, tb_writer=None):

FILE: ptp/utils/termination_condition.py
  class TerminationCondition (line 20) | class TerminationCondition(Exception):
    method __init__ (line 22) | def __init__(self, msg):
    method __str__ (line 26) | def __str__(self):

FILE: ptp/workers/offline_trainer.py
  class OfflineTrainer (line 27) | class OfflineTrainer(Trainer):
    method __init__ (line 40) | def __init__(self):
    method setup_experiment (line 47) | def setup_experiment(self):
    method run_experiment (line 111) | def run_experiment(self):
  function main (line 352) | def main():

FILE: ptp/workers/online_trainer.py
  class OnlineTrainer (line 27) | class OnlineTrainer(Trainer):
    method __init__ (line 42) | def __init__(self):
    method setup_experiment (line 49) | def setup_experiment(self):
    method run_experiment (line 113) | def run_experiment(self):
  function main (line 355) | def main():

FILE: ptp/workers/processor.py
  class Processor (line 37) | class Processor(Worker):
    method __init__ (line 45) | def __init__(self):
    method setup_global_experiment (line 59) | def setup_global_experiment(self):
    method setup_individual_experiment (line 136) | def setup_individual_experiment(self):
    method initialize_statistics_collection (line 327) | def initialize_statistics_collection(self):
    method finalize_statistics_collection (line 349) | def finalize_statistics_collection(self):
    method run_experiment (line 357) | def run_experiment(self):
  function main (line 435) | def main():

FILE: ptp/workers/test_data_dict_parallel.py
  class RandomDataset (line 16) | class RandomDataset(Task):
    method __init__ (line 18) | def __init__(self, size, length):
    method __getitem__ (line 22) | def __getitem__(self, index):
    method __len__ (line 32) | def __len__(self):
    method output_data_definitions (line 35) | def output_data_definitions(self):
    method collate_fn (line 38) | def collate_fn(self, batch):
  class TestModel1 (line 43) | class TestModel1(Model):
    method __init__ (line 45) | def __init__(self, input_size, output_size):
    method forward (line 49) | def forward(self, datadict):
    method input_data_definitions (line 59) | def input_data_definitions(self):
    method output_data_definitions (line 62) | def output_data_definitions(self):
  class TestModel2 (line 66) | class TestModel2(Model):
    method __init__ (line 68) | def __init__(self, input_size, output_size):
    method forward (line 72) | def forward(self, datadict):
    method input_data_definitions (line 81) | def input_data_definitions(self):
    method output_data_definitions (line 84) | def output_data_definitions(self):

FILE: ptp/workers/trainer.py
  class Trainer (line 38) | class Trainer(Worker):
    method __init__ (line 48) | def __init__(self, name, class_type):
    method setup_experiment (line 97) | def setup_experiment(self):
    method add_statistics (line 385) | def add_statistics(self, stat_col):
    method add_aggregators (line 399) | def add_aggregators(self, stat_agg):
    method initialize_statistics_collection (line 413) | def initialize_statistics_collection(self):
    method finalize_statistics_collection (line 458) | def finalize_statistics_collection(self):
    method initialize_tensorboard (line 470) | def initialize_tensorboard(self):
    method finalize_tensorboard (line 495) | def finalize_tensorboard(self):
    method validate_on_batch (line 509) | def validate_on_batch(self, valid_batch):
    method validate_on_set (line 536) | def validate_on_set(self):

FILE: ptp/workers/worker.py
  class Worker (line 33) | class Worker(object):
    method __init__ (line 39) | def __init__(self, name, class_type, add_default_parser_args = True):
    method setup_experiment (line 158) | def setup_experiment(self):
    method add_statistics (line 185) | def add_statistics(self, stat_col):
    method add_aggregators (line 196) | def add_aggregators(self, stat_agg):
    method run_experiment (line 212) | def run_experiment(self):
    method collect_all_statistics (line 222) | def collect_all_statistics(self, task_mgr, pipeline_mgr, data_streams,...
    method aggregate_all_statistics (line 249) | def aggregate_all_statistics(self, task_mgr, pipeline_mgr, stat_col, s...
    method export_all_statistics (line 273) | def export_all_statistics(self, stat_obj, tag='', export_to_log = True):
    method set_random_seeds (line 297) | def set_random_seeds(self, section_name, config):

FILE: tests/application/pipeline_tests.py
  class TestPipeline (line 27) | class TestPipeline(unittest.TestCase):
    method __init__ (line 29) | def __init__(self, *args, **kwargs):
    method test_create_component_full_type (line 35) | def test_create_component_full_type(self):
    method test_create_component_type (line 55) | def test_create_component_type(self):
    method test_disable_component (line 75) | def test_disable_component(self):
    method test_priorities (line 96) | def test_priorities(self):

FILE: tests/application/sampler_factory_tests.py
  class TestTaskMockup (line 28) | class TestTaskMockup(object):
    method __len__ (line 29) | def __len__(self):
  class TestSamplerFactory (line 32) | class TestSamplerFactory(unittest.TestCase):
    method __init__ (line 34) | def __init__(self, *args, **kwargs):
    method test_create_subset_random_sampler_range (line 37) | def test_create_subset_random_sampler_range(self):
    method test_create_subset_random_sampler_range_str (line 50) | def test_create_subset_random_sampler_range_str(self):
    method test_create_subset_random_sampler_list_of_indices (line 64) | def test_create_subset_random_sampler_list_of_indices(self):
    method test_create_subset_random_sampler_file (line 78) | def test_create_subset_random_sampler_file(self):

FILE: tests/application/samplers_tests.py
  class TestkFoldRandomSampler (line 27) | class TestkFoldRandomSampler(unittest.TestCase):
    method __init__ (line 29) | def __init__(self, *args, **kwargs):
    method test_kfold_random_sampler_current_fold (line 32) | def test_kfold_random_sampler_current_fold(self):
    method test_kfold_random_sampler_current_fold_10epochs (line 71) | def test_kfold_random_sampler_current_fold_10epochs(self):
    method test_kfold_random_sampler_all_but_current_fold (line 98) | def test_kfold_random_sampler_all_but_current_fold(self):
  class TestkFoldWeightedRandomSampler (line 139) | class TestkFoldWeightedRandomSampler(unittest.TestCase):
    method __init__ (line 141) | def __init__(self, *args, **kwargs):
    method test_kfold_weighed_random_sampler_current_fold (line 144) | def test_kfold_weighed_random_sampler_current_fold(self):

FILE: tests/components/component_tests.py
  class MockupTask (line 26) | class MockupTask (Task):
    method __init__ (line 30) | def __init__(self, name, config):
    method output_data_definitions (line 33) | def output_data_definitions(self):
  class MockupComponent (line 39) | class MockupComponent (Component):
    method __init__ (line 43) | def __init__(self, name, config):
  class TestComponent (line 47) | class TestComponent(unittest.TestCase):
    method __init__ (line 49) | def __init__(self, *args, **kwargs):
    method test_create_data_streams_key_present (line 61) | def test_create_data_streams_key_present(self):
    method test_extend_data_streams_key_present (line 71) | def test_extend_data_streams_key_present(self):
    method test_global_set_get (line 82) | def test_global_set_get(self):
    method test_global_overwrite (line 89) | def test_global_overwrite(self):

FILE: tests/components/tasks/clevr_tests.py
  class TestCLEVR (line 28) | class TestCLEVR(unittest.TestCase):
    method test_training_set (line 30) | def test_training_set(self):
    method test_validation_set (line 79) | def test_validation_set(self):
    method test_test_set (line 125) | def test_test_set(self):

FILE: tests/components/tasks/gqa_tests.py
  class TestGQA (line 28) | class TestGQA(unittest.TestCase):
    method test_training_0_split (line 31) | def test_training_0_split(self):
    method test_validation_split (line 74) | def test_validation_split(self):
    method test_test_dev_split (line 117) | def test_test_dev_split(self):
    method test_test_split (line 160) | def test_test_split(self):

FILE: tests/components/tasks/task_tests.py
  class MockupTask (line 26) | class MockupTask (Task):
    method __init__ (line 30) | def __init__(self, name, config):
    method output_data_definitions (line 33) | def output_data_definitions(self):
  class TestTask (line 40) | class TestTask(unittest.TestCase):
    method __init__ (line 42) | def __init__(self, *args, **kwargs):
    method test_crete_data_streams_key_present (line 51) | def test_crete_data_streams_key_present(self):

FILE: tests/configuration/config_interface_tests.py
  class TestConfigInterface (line 23) | class TestConfigInterface(unittest.TestCase):
    method test_default_params (line 25) | def test_default_params(self):
    method test_config_params (line 54) | def test_config_params(self):
    method test_overwrite_params (line 66) | def test_overwrite_params(self):

FILE: tests/configuration/config_registry_tests.py
  class TestConfigRegistry (line 23) | class TestConfigRegistry(unittest.TestCase):
    method test_default_params (line 25) | def test_default_params(self):
    method test_config_params (line 38) | def test_config_params(self):
    method test_overwrite_params (line 51) | def test_overwrite_params(self):

FILE: tests/configuration/handshaking_tests.py
  class MockupComponent (line 26) | class MockupComponent (Component):
    method __init__ (line 30) | def __init__(self):
    method input_data_definitions (line 33) | def input_data_definitions(self):
    method output_data_definitions (line 40) | def output_data_definitions(self):
  class TestHandshaking (line 46) | class TestHandshaking(unittest.TestCase):
    method __init__ (line 48) | def __init__(self, *args, **kwargs):
    method test_handshake_input_definitions_keys (line 57) | def test_handshake_input_definitions_keys(self):
    method test_handshake_input_definitions_dimensions (line 73) | def test_handshake_input_definitions_dimensions(self):
    method test_handshake_input_definitions_types (line 92) | def test_handshake_input_definitions_types(self):
    method test_extension_definitions (line 107) | def test_extension_definitions(self):

FILE: tests/data_types/data_definition_tests.py
  class TestDataDefinition (line 23) | class TestDataDefinition(unittest.TestCase):
    method test_values (line 25) | def test_values(self):
    method test_override (line 34) | def test_override(self):

FILE: tests/data_types/data_streams_tests.py
  class TestDataStreams (line 23) | class TestDataStreams(unittest.TestCase):
    method __init__ (line 25) | def __init__(self, *args, **kwargs):
    method test_keys_present (line 35) | def test_keys_present(self):
    method test_keys_absent (line 47) | def test_keys_absent(self):
    method test_keys_extend (line 55) | def test_keys_extend(self):

FILE: tests/utils/app_state_tests.py
  class TestAppState (line 22) | class TestAppState(unittest.TestCase):
    method test_01keys_present (line 25) | def test_01keys_present(self):
    method test_02keys_present_singleton (line 34) | def test_02keys_present_singleton(self):
    method test_03keys_absent (line 41) | def test_03keys_absent(self):
    method test_04keys_overwrite (line 46) | def test_04keys_overwrite(self):

FILE: tests/utils/statistics_tests.py
  class TestStatistics (line 26) | class TestStatistics(unittest.TestCase):
    method __init__ (line 28) | def __init__(self, *args, **kwargs):
    method test_collector_string (line 31) | def test_collector_string(self):
    method test_aggregator_string (line 67) | def test_aggregator_string(self):
Condensed preview — 278 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,370K chars).
[
  {
    "path": ".coveralls.yml",
    "chars": 23,
    "preview": "service_name: travis-ci"
  },
  {
    "path": ".gitignore",
    "chars": 1221,
    "preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
  },
  {
    "path": ".lgtm.yml",
    "chars": 638,
    "preview": "extraction:\n  python:\n    python_setup:\n      version: 3\n    index:\n      exclude:\n        - .git\n    #after_prepare:\n  "
  },
  {
    "path": ".travis.yml",
    "chars": 2052,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "LICENSE",
    "chars": 11357,
    "preview": "                                 Apache License\n                           Version 2.0, January 2004\n                   "
  },
  {
    "path": "README.md",
    "chars": 15304,
    "preview": "# PyTorchPipe\r\n\r\n![Language](https://img.shields.io/badge/language-Python-blue.svg)\r\n[![GitHub license](https://img.shie"
  },
  {
    "path": "configs/cifar100/cifar100_classification_convnet_softmax.yml",
    "chars": 745,
    "preview": "# Load config defining CIFAR100 tasks for training, validation and testing.\ndefault_configs: cifar100/default_cifar100.y"
  },
  {
    "path": "configs/cifar100/default_cifar100.yml",
    "chars": 2087,
    "preview": "# Training parameters:\ntraining:\n  task: \n    type: CIFAR100\n    batch_size: &b 1024\n    use_train_data: True\n  # Use sa"
  },
  {
    "path": "configs/clevr/clevr_all_vgg_glove_lstm_concat_ffn.yml",
    "chars": 2872,
    "preview": "# Load config defining CLEVR tasks for training, validation and testing.\ndefault_configs: clevr/default_clevr.yml\n\n# Res"
  },
  {
    "path": "configs/clevr/clevr_image_convnet_ffn.yml",
    "chars": 795,
    "preview": "# Load config defining CLEVR tasks for training, validation and testing.\ndefault_configs: clevr/default_clevr.yml\n\n# Def"
  },
  {
    "path": "configs/clevr/clevr_question_glove_lstm.yml",
    "chars": 1469,
    "preview": "# Load config defining CLEVR tasks for training, validation and testing.\ndefault_configs: clevr/default_clevr.yml\n\n# Thi"
  },
  {
    "path": "configs/clevr/default_clevr.yml",
    "chars": 2066,
    "preview": "# Training parameters:\ntraining:\n  task: \n    type: CLEVR\n    batch_size: &b 64\n    split: training\n    #resize_image: ["
  },
  {
    "path": "configs/default/components/language/bow_encoder.yml",
    "chars": 1307,
    "preview": "# This file defines the default values for BOW Encoder.\n\n###############################################################"
  },
  {
    "path": "configs/default/components/language/label_indexer.yml",
    "chars": 2642,
    "preview": "# This file defines the default values for LabelIndexer.\n\n##############################################################"
  },
  {
    "path": "configs/default/components/language/sentence_indexer.yml",
    "chars": 3223,
    "preview": "# This file defines the default values for Sentence Indexer.\n\n##########################################################"
  },
  {
    "path": "configs/default/components/language/sentence_one_hot_encoder.yml",
    "chars": 2338,
    "preview": "# This file defines the default values for Sentence 1-hot Encoder.\n\n####################################################"
  },
  {
    "path": "configs/default/components/language/sentence_tokenizer.yml",
    "chars": 1694,
    "preview": "# This file defines the default values for Sentence Tokenizer.\n\n########################################################"
  },
  {
    "path": "configs/default/components/language/word_decoder.yml",
    "chars": 2328,
    "preview": "# This file defines the default values for Word Decoder.\n\n##############################################################"
  },
  {
    "path": "configs/default/components/losses/nll_loss.yml",
    "chars": 1814,
    "preview": "# This file defines the default values for the NLL Loss.\n\n##############################################################"
  },
  {
    "path": "configs/default/components/masking/join_masked_predictions.yml",
    "chars": 1853,
    "preview": "# This file defines the default values for the Join Masked Predictions component.\n\n#####################################"
  },
  {
    "path": "configs/default/components/masking/string_to_mask.yml",
    "chars": 1749,
    "preview": "# This file defines the default values for the String To Mask component.\n\n##############################################"
  },
  {
    "path": "configs/default/components/models/general_usage/attention_decoder.yml",
    "chars": 2774,
    "preview": "# This file defines the default values for the RNN model.\n\n#############################################################"
  },
  {
    "path": "configs/default/components/models/general_usage/feed_forward_network.yml",
    "chars": 2078,
    "preview": "# This file defines the default values for the Multi-Layer Feed-Forward Network.\n\n######################################"
  },
  {
    "path": "configs/default/components/models/general_usage/recurrent_neural_network.yml",
    "chars": 3381,
    "preview": "# This file defines the default values for the RNN model.\n\n#############################################################"
  },
  {
    "path": "configs/default/components/models/general_usage/seq2seq.yml",
    "chars": 2701,
    "preview": "# This file defines the default values for the RNN model.\n\n#############################################################"
  },
  {
    "path": "configs/default/components/models/language/index_embeddings.yml",
    "chars": 1583,
    "preview": "# This file defines the default values for the Index Embeddings.\n\n######################################################"
  },
  {
    "path": "configs/default/components/models/language/sentence_embeddings.yml",
    "chars": 3292,
    "preview": "# This file defines the default values for the Sentence Embeddings.\n\n###################################################"
  },
  {
    "path": "configs/default/components/models/multi_modal_reasoning/compact_bilinear_pooling.yml",
    "chars": 1864,
    "preview": "# This file defines the default values for the Multimodal Compact Bilinear Pooling model.\n\n#############################"
  },
  {
    "path": "configs/default/components/models/multi_modal_reasoning/factorized_bilinear_pooling.yml",
    "chars": 1839,
    "preview": "# This file defines the default values for the FactorizedBilinearPooling model.\n\n#######################################"
  },
  {
    "path": "configs/default/components/models/multi_modal_reasoning/low_rank_bilinear_pooling.yml",
    "chars": 1734,
    "preview": "# This file defines the default values for the LowRankBilinearPooling model.\n\n##########################################"
  },
  {
    "path": "configs/default/components/models/multi_modal_reasoning/question_driven_attention.yml",
    "chars": 2186,
    "preview": "# This file defines the default values for the QuestionDrivenAttention model.\n\n#########################################"
  },
  {
    "path": "configs/default/components/models/multi_modal_reasoning/relational_network.yml",
    "chars": 2015,
    "preview": "# This file defines the default values for the LowRankBilinearPooling model.\n\n##########################################"
  },
  {
    "path": "configs/default/components/models/multi_modal_reasoning/self_attention.yml",
    "chars": 1656,
    "preview": "# This file defines the default values for the Self_Attention model.\n\n##################################################"
  },
  {
    "path": "configs/default/components/models/vision/convnet_encoder.yml",
    "chars": 2422,
    "preview": "# This file defines the default values for the simple 3-layer ConvNet model.\n\n##########################################"
  },
  {
    "path": "configs/default/components/models/vision/generic_image_encoder.yml",
    "chars": 2118,
    "preview": "# This file defines the default values for the component wrapping (pretrained) Torch Vision models.\n\n###################"
  },
  {
    "path": "configs/default/components/models/vision/lenet5.yml",
    "chars": 1336,
    "preview": "# This file defines the default values for the LeNet5 model.\n\n##########################################################"
  },
  {
    "path": "configs/default/components/publishers/global_variable_publisher.yml",
    "chars": 1366,
    "preview": "# This file defines the default values for the Global Variable Publisher.\n\n#############################################"
  },
  {
    "path": "configs/default/components/publishers/stream_file_exporter.yml",
    "chars": 1654,
    "preview": "# This file defines the default values for the Stream File Exporter.\n\n##################################################"
  },
  {
    "path": "configs/default/components/statistics/accuracy_statistics.yml",
    "chars": 1793,
    "preview": "# This file defines the default values for the Accuracy statistics.\n\n###################################################"
  },
  {
    "path": "configs/default/components/statistics/batch_size_statistics.yml",
    "chars": 1202,
    "preview": "# This file defines the default values for the Batch size statistics.\n\n#################################################"
  },
  {
    "path": "configs/default/components/statistics/bleu_statistics.yml",
    "chars": 2089,
    "preview": "# This file defines the default values for the BLEU statistics.\n\n#######################################################"
  },
  {
    "path": "configs/default/components/statistics/precision_recall_statistics.yml",
    "chars": 2232,
    "preview": "# This file defines the default values for the PrecisionRecall statistics.\n\n############################################"
  },
  {
    "path": "configs/default/components/tasks/image_text_to_class/clevr.yml",
    "chars": 2775,
    "preview": "# This file defines the default values for the CLEVR task.\n\n############################################################"
  },
  {
    "path": "configs/default/components/tasks/image_text_to_class/gqa.yml",
    "chars": 2696,
    "preview": "# This file defines the default values for the GQA task.\n\n##############################################################"
  },
  {
    "path": "configs/default/components/tasks/image_text_to_class/vqa_med_2019.yml",
    "chars": 4740,
    "preview": "# This file defines the default values for the VQAMED2019 task.\n\n#######################################################"
  },
  {
    "path": "configs/default/components/tasks/image_to_class/cifar_100.yml",
    "chars": 2283,
    "preview": "# This file defines the default values for the CIFAR-100 task.\n\n########################################################"
  },
  {
    "path": "configs/default/components/tasks/image_to_class/mnist.yml",
    "chars": 2072,
    "preview": "# This file defines the default values for the MNIST task.\n\n############################################################"
  },
  {
    "path": "configs/default/components/tasks/image_to_class/simple_molecules.yml",
    "chars": 2205,
    "preview": "# This file defines the default values for the MNIST task.\n\n############################################################"
  },
  {
    "path": "configs/default/components/tasks/text_to_class/dummy_language_identification.yml",
    "chars": 1675,
    "preview": "# This file defines the default values for the dummy language identification task.\n\n####################################"
  },
  {
    "path": "configs/default/components/tasks/text_to_class/wily_language_identification.yml",
    "chars": 1673,
    "preview": "# This file defines the default values for the WiLY language identification task.\n\n#####################################"
  },
  {
    "path": "configs/default/components/tasks/text_to_class/wily_ngram_language_modeling.yml",
    "chars": 1654,
    "preview": "# This file defines the default values for the ngram language modeling\n# using WiLY dataset.\n\n##########################"
  },
  {
    "path": "configs/default/components/tasks/text_to_text/translation_pairs.yml",
    "chars": 1791,
    "preview": "# This file defines the default values for the WikiText language modeling.\n\n############################################"
  },
  {
    "path": "configs/default/components/tasks/text_to_text/wikitext_language_modeling.yml",
    "chars": 1805,
    "preview": "# This file defines the default values for the WikiText language modeling.\n\n############################################"
  },
  {
    "path": "configs/default/components/transforms/concatenate_tensor.yml",
    "chars": 1809,
    "preview": "# This file defines the default values for the ConcatenateTensor.\n\n#####################################################"
  },
  {
    "path": "configs/default/components/transforms/list_to_tensor.yml",
    "chars": 1432,
    "preview": "# This file defines the default values for the List to Tensor transformation.\n\n#########################################"
  },
  {
    "path": "configs/default/components/transforms/non_linearity.yml",
    "chars": 1622,
    "preview": "# This file defines the default values for the NonLinearity.\n\n##########################################################"
  },
  {
    "path": "configs/default/components/transforms/reduce_tensor.yml",
    "chars": 1720,
    "preview": "# This file defines the default values for the ReduceTensor transformation.\n\n###########################################"
  },
  {
    "path": "configs/default/components/transforms/reshape_tensor.yml",
    "chars": 1625,
    "preview": "# This file defines the default values for the Tensor Reshaper.\n\n#######################################################"
  },
  {
    "path": "configs/default/components/viewers/image_viewer.yml",
    "chars": 1426,
    "preview": "# This file defines the default values for the ImageViewer.\n\n###########################################################"
  },
  {
    "path": "configs/default/components/viewers/stream_viewer.yml",
    "chars": 1371,
    "preview": "# This file defines the default values for the Stream Viewer.\n\n#########################################################"
  },
  {
    "path": "configs/default/workers/offline_trainer.yml",
    "chars": 4872,
    "preview": "####################################################################\n# Section defining all the default values of parame"
  },
  {
    "path": "configs/default/workers/online_trainer.yml",
    "chars": 4868,
    "preview": "####################################################################\n# Section defining all the default values of parame"
  },
  {
    "path": "configs/default/workers/processor.yml",
    "chars": 2263,
    "preview": "####################################################################\n# Section defining all the default values of parame"
  },
  {
    "path": "configs/mnist/default_mnist.yml",
    "chars": 1810,
    "preview": "# Training parameters:\ntraining:\n  task: \n    type: MNIST\n    batch_size: &b 64\n    use_train_data: True\n    #resize: [3"
  },
  {
    "path": "configs/mnist/mnist_classification_convnet_softmax.yml",
    "chars": 695,
    "preview": "# Load config defining MNIST tasks for training, validation and testing.\ndefault_configs: mnist/default_mnist.yml\n\npipel"
  },
  {
    "path": "configs/mnist/mnist_classification_kfold_softmax.yml",
    "chars": 1393,
    "preview": "# Load config defining MNIST tasks for training, validation and testing.\ndefault_configs: mnist/default_mnist.yml\n\n# Tra"
  },
  {
    "path": "configs/mnist/mnist_classification_lenet5.yml",
    "chars": 562,
    "preview": "# Load config defining MNIST tasks for training, validation and testing.\ndefault_configs: mnist/default_mnist.yml\n\n# Tra"
  },
  {
    "path": "configs/mnist/mnist_classification_softmax.yml",
    "chars": 625,
    "preview": "# Load config defining MNIST tasks for training, validation and testing.\ndefault_configs: mnist/default_mnist.yml\n\npipel"
  },
  {
    "path": "configs/mnist/mnist_classification_vf_2lenet5_2losses.yml",
    "chars": 5584,
    "preview": "# Load config defining MNIST tasks for training, validation and testing.\ndefault_configs: mnist/default_mnist.yml\n\n# Tra"
  },
  {
    "path": "configs/mnist/mnist_classification_vf_shared_convnet_2softmaxes_2losses.yml",
    "chars": 6119,
    "preview": "# Load config defining MNIST tasks for training, validation and testing.\ndefault_configs: mnist/default_mnist.yml\n\n# Tra"
  },
  {
    "path": "configs/molecule_classification/default_molecule_classification.yml",
    "chars": 2105,
    "preview": "# Training parameters:\ntraining:\n  task: \n    type: SimpleMolecules\n    batch_size: &b 64\n    split: training\n    resize"
  },
  {
    "path": "configs/molecule_classification/molecule_classification_convnet_softmax.yml",
    "chars": 1213,
    "preview": "# Load config defining MNIST tasks for training, validation and testing.\ndefault_configs: molecule_classification/defaul"
  },
  {
    "path": "configs/molecule_classification/molecule_classification_vgg16_molecules.yml",
    "chars": 1453,
    "preview": "# Load config defining MNIST tasks for training, validation and testing.\ndefault_configs: molecule_classification/defaul"
  },
  {
    "path": "configs/translation/eng_fra_translation_enc_attndec.yml",
    "chars": 4415,
    "preview": "# This pipeline applied an encoder-decoder GRU with attention on the open Tatoeba translation sentence pairs. \n# Inspire"
  },
  {
    "path": "configs/tutorials/mnist_classification_convnet_softmax.yml",
    "chars": 2587,
    "preview": "# Training parameters:\ntraining:\n  task: \n    type: MNIST\n    batch_size: &b 64\n    use_train_data: True\n  # Use sampler"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/c1_classification_all_bow_vgg16_concat.yml",
    "chars": 2345,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c1_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/c1_classification_all_rnn_vgg16_concat.yml",
    "chars": 2587,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c1_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/c1_classification_image_cnn_softmax.yml",
    "chars": 792,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c1_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/c1_classification_image_size_softmax.yml",
    "chars": 551,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c1_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/c1_classification_question_mimic_rnn.yml",
    "chars": 1024,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c1_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/c1_classification_question_onehot_bow.yml",
    "chars": 1119,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c1_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/c1_classification_question_rnn.yml",
    "chars": 1003,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c1_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/c1_classification_vf_question_rnn_separate_q_categorization.yml",
    "chars": 2922,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c1_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c1_classification/default_c1_classification.yml",
    "chars": 1909,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_resnet152_ewm_cat_is.yml",
    "chars": 4281,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_resnet152_rn_cat_is.yml",
    "chars": 4104,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_resnet50_attn_cat_is.yml",
    "chars": 2714,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_resnet50_coattn_mfb_cat_is.yml",
    "chars": 3673,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_resnet50_ewm_cat_is.yml",
    "chars": 4002,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_resnet50_mfb_cat_is.yml",
    "chars": 2806,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_resnet50_rn_cat_is.yml",
    "chars": 4103,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_selfattn.yml",
    "chars": 2409,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_vgg16_rn.yml",
    "chars": 2872,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_class_lstm_vgg16_rn_cat_is.yml",
    "chars": 4040,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_classification_all_rnn_vgg16_concat.yml",
    "chars": 2609,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_classification_all_rnn_vgg16_ewm.yml",
    "chars": 2400,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_classification_all_rnn_vgg16_ewm_size.yml",
    "chars": 3637,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_classification_all_rnn_vgg16_mcb.yml",
    "chars": 2418,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/c2_word_answer_onehot_bow.yml",
    "chars": 1609,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c2_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c2_classification/default_c2_classification.yml",
    "chars": 1919,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_all_bow_vgg16_concat.yml",
    "chars": 2345,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_all_concat.yml",
    "chars": 2942,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_all_rnn_vgg16_concat.yml",
    "chars": 2587,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_image_cnn_softmax.yml",
    "chars": 851,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_image_plus_size_concat.yml",
    "chars": 2026,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_image_size_softmax.yml",
    "chars": 550,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_image_softmax.yml",
    "chars": 779,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_image_vgg16_softmax.yml",
    "chars": 592,
    "preview": "  # Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/def"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_question_onehot_bow.yml",
    "chars": 1120,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/c3_classification_question_rnn.yml",
    "chars": 1001,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c3_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c3_classification/default_c3_classification.yml",
    "chars": 1909,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/c4_classification_all_rnn_vgg16_ewm_size.yml",
    "chars": 3647,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c4_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/c4_enc_attndec.yml",
    "chars": 4143,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/c4_enc_attndec_resnet152_ewm_cat_is.yml",
    "chars": 6327,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/c4_frozen_if_gru_dec.yml",
    "chars": 4282,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/c4_word_answer_glove_sum.yml",
    "chars": 2148,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c4_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/c4_word_answer_mimic_sum.yml",
    "chars": 2167,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c4_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/c4_word_answer_onehot_bow.yml",
    "chars": 1810,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c4_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/c4_word_answer_onehot_sum.yml",
    "chars": 2133,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/c4_classification/defau"
  },
  {
    "path": "configs/vqa_med_2019/c4_classification/default_c4_classification.yml",
    "chars": 1911,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/default_vqa_med_2019.yml",
    "chars": 1240,
    "preview": "# Training parameters:\ntraining:\n  task:\n    type: &p_type VQAMED2019\n    data_folder: &data_folder ~/data/vqa-med\n    s"
  },
  {
    "path": "configs/vqa_med_2019/evaluation/deepta/glove_gru_resnet50_coattn_mfb_is_cat_ffn_c123_loss.yml",
    "chars": 9736,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/evaluation/deepta/glove_gru_vgg16_coattn_mfb_is_cat_ffn_c1234_loss.yml",
    "chars": 9791,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/evaluation/example_mimic_lstm_vgg16_ewm_is_cat_ffn_c123_loss.yml",
    "chars": 9343,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/frozen_if_ffn_c1234_loss.yml",
    "chars": 4405,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/frozen_if_ffn_c123_loss.yml",
    "chars": 4436,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/frozen_if_vf_5ffn_c1234yn_5losses.yml",
    "chars": 15114,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/frozen_if_vf_5ffn_support_c1234yn_5losses.yml",
    "chars": 17465,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/tom/glove_lstm_resnet152_att_is_cat_ffn_c123_loss.yml",
    "chars": 9371,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/tom/glove_lstm_resnet152_mcb_is_cat_ffn_c123_loss.yml",
    "chars": 9287,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/tom/glove_lstm_vgg16_att_is_cat_ffn_c123_loss.yml",
    "chars": 9370,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/tom/glove_lstm_vgg16_ewm_is_cat_ffn_c123_loss.yml",
    "chars": 9273,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/evaluation/tom/glove_lstm_vgg16_mcb_is_cat_ffn_c123_loss.yml",
    "chars": 9262,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/default_vqa_med_2019."
  },
  {
    "path": "configs/vqa_med_2019/extend_answers.yml",
    "chars": 2581,
    "preview": "# This config is not a standalone config!\n# It adds new sections (sets) without samplers and components for saving answe"
  },
  {
    "path": "configs/vqa_med_2019/extend_answers_c4.yml",
    "chars": 2069,
    "preview": "# This config is not a standalone config!\n# It adds new sections (sets) without samplers and components for saving answe"
  },
  {
    "path": "configs/vqa_med_2019/frozen_pipelines/frozen_input_fusion_glove_lstm_vgg_att_is_cat.yml",
    "chars": 6720,
    "preview": "# Part of pipeline containing components constituting the \"Inputs Fusion\" pipeline.\n\n# Inputs:\n#  streams:\n#   * tokeniz"
  },
  {
    "path": "configs/vqa_med_2019/frozen_pipelines/frozen_question_categorization_glove_rnn_ffn.yml",
    "chars": 3208,
    "preview": "# Part of pipeline containing components constituting the \"Question Categorizer\" pipeline.\n\n# Inputs:\n#   * tokenized_qu"
  },
  {
    "path": "configs/vqa_med_2019/frozen_pipelines/frozen_word_answer_glove_sum.yml",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "configs/vqa_med_2019/frozen_pipelines/input_fusion_processor_io.yml",
    "chars": 1984,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs:\n  vqa_med_2019/frozen_pipelines/froz"
  },
  {
    "path": "configs/vqa_med_2019/question_categorization/default_question_categorization.yml",
    "chars": 1667,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/question_categorization/question_categorization_onehot_bow.yml",
    "chars": 1955,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/question_categorization"
  },
  {
    "path": "configs/vqa_med_2019/question_categorization/question_categorization_onehot_rnn.yml",
    "chars": 1206,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/question_categorization"
  },
  {
    "path": "configs/vqa_med_2019/question_categorization/question_categorization_rnn.yml",
    "chars": 990,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/question_categorization"
  },
  {
    "path": "configs/vqa_med_2019/question_categorization/question_categorization_rnn_ffn.yml",
    "chars": 1678,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/question_categorization"
  },
  {
    "path": "configs/vqa_med_2019/vf/c1_binary_vf_cat_hard_shared_question_rnn_two_ffns_losses.yml",
    "chars": 7017,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/c1_binary_vf_cat_rnn_shared_all_encoders_two_ffns_losses.yml",
    "chars": 11662,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/c1_binary_vf_cat_rnn_shared_question_rnn_two_ffns_losses.yml",
    "chars": 10888,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/c1_c2_c3_binary_vf_cat_rnn_shared_all_encoders_four_ffns_losses.yml",
    "chars": 15615,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/c1_c3_binary_vf_cat_rnn_shared_all_encoders_three_ffns_losses.yml",
    "chars": 13619,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/lstm_resnet152_is_cat_ffn_c123_no_binary_loss.yml",
    "chars": 9079,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/lstm_resnet50_ewm_is_cat_ffn_c123_loss_ffn_yn_loss.yml",
    "chars": 13434,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/lstm_resnet50_ewm_is_cat_ffn_c123_no_binary_loss.yml",
    "chars": 9827,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/lstm_resnet50_is_cat_ffn_c123_no_binary_loss.yml",
    "chars": 8858,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/lstm_vgg16_is_cat_ffn_c123_binary_yn_loss.yml",
    "chars": 9027,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/lstm_vgg16_is_cat_ffn_c123_no_yn_loss.yml",
    "chars": 9036,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/vqa_med_2019/vf/lstm_vgg16_is_cat_ffn_only_yn_loss.yml",
    "chars": 8799,
    "preview": "# Load config defining tasks for training, validation and testing.\ndefault_configs: vqa_med_2019/default_vqa_med_2019.ym"
  },
  {
    "path": "configs/wikitext/wikitext_language_modeling_encoder_attndecoder.yml",
    "chars": 4007,
    "preview": "# This pipeline applies seq2seq on wikitext-2 to make word-level prediction.\n# It's been made for test purposes only, as"
  },
  {
    "path": "configs/wikitext/wikitext_language_modeling_rnn.yml",
    "chars": 2690,
    "preview": "# Training parameters:\ntraining:\n  task:\n    type: &p_type WikiTextLanguageModeling\n    data_folder: &data_folder ~/data"
  },
  {
    "path": "configs/wikitext/wikitext_language_modeling_seq2seq.yml",
    "chars": 4050,
    "preview": "# This pipeline applies seq2seq on wikitext-2 to make word-level prediction.\n# It's been made for test purposes only, as"
  },
  {
    "path": "configs/wikitext/wikitext_language_modeling_seq2seq_simple.yml",
    "chars": 3352,
    "preview": "# This pipeline applies seq2seq on wikitext-2 to make word-level prediction.\n# It's been made for test purposes only, as"
  },
  {
    "path": "configs/wily/dummy_language_identification_bow.yml",
    "chars": 3086,
    "preview": "# Training parameters:\ntraining:\n  task:\n    type: &p_type DummyLanguageIdentification\n    batch_size:  2\n    use_train_"
  },
  {
    "path": "configs/wily/wily_language_identification_bow.yml",
    "chars": 3370,
    "preview": "  # Training parameters:\ntraining:\n  task:\n    type: &p_type WiLYLanguageIdentification\n    data_folder: &data_folder '~"
  },
  {
    "path": "configs/wily/wily_ngram_language_modeling.yml",
    "chars": 3297,
    "preview": "  # Training parameters:\ntraining:\n  task:\n    type: &p_type WiLYNGramLanguageModeling\n    data_folder: &data_folder '~/"
  },
  {
    "path": "ptp/__init__.py",
    "chars": 727,
    "preview": "from .utils import *\n\nfrom .application import *\n\n# Components.\nfrom .components.component import Component\n\nfrom .compo"
  },
  {
    "path": "ptp/application/__init__.py",
    "chars": 283,
    "preview": "from .component_factory import ComponentFactory\nfrom .pipeline_manager import PipelineManager\nfrom .task_manager import "
  },
  {
    "path": "ptp/application/component_factory.py",
    "chars": 2795,
    "preview": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version "
  },
  {
    "path": "ptp/application/pipeline_manager.py",
    "chars": 30266,
    "preview": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version "
  },
  {
    "path": "ptp/application/sampler_factory.py",
    "chars": 9871,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the A"
  },
  {
    "path": "ptp/application/task_manager.py",
    "chars": 8288,
    "preview": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version "
  },
  {
    "path": "ptp/components/component.py",
    "chars": 10857,
    "preview": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \""
  },
  {
    "path": "ptp/components/language/__init__.py",
    "chars": 422,
    "preview": "from .bow_encoder import BOWEncoder\nfrom .label_indexer import LabelIndexer\nfrom .sentence_indexer import SentenceIndexe"
  },
  {
    "path": "ptp/components/language/bow_encoder.py",
    "chars": 4374,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/language/label_indexer.py",
    "chars": 3964,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/language/sentence_indexer.py",
    "chars": 9466,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/language/sentence_one_hot_encoder.py",
    "chars": 4091,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/language/sentence_tokenizer.py",
    "chars": 6062,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/language/word_decoder.py",
    "chars": 3696,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/losses/__init__.py",
    "chars": 99,
    "preview": "from .loss import Loss\nfrom .nll_loss import NLLLoss\n\n__all__ = [\n    'NLLLoss',\n    'Loss',\n    ]\n"
  },
  {
    "path": "ptp/components/losses/loss.py",
    "chars": 4257,
    "preview": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version "
  },
  {
    "path": "ptp/components/losses/nll_loss.py",
    "chars": 4729,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/masking/__init__.py",
    "chars": 168,
    "preview": "from .join_masked_predictions import JoinMaskedPredictions\nfrom .string_to_mask import StringToMask\n\n__all__ = [\n    'Jo"
  },
  {
    "path": "ptp/components/masking/join_masked_predictions.py",
    "chars": 7801,
    "preview": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version "
  },
  {
    "path": "ptp/components/masking/string_to_mask.py",
    "chars": 3520,
    "preview": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version "
  },
  {
    "path": "ptp/components/mixins/embeddings.py",
    "chars": 6974,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the A"
  },
  {
    "path": "ptp/components/mixins/io.py",
    "chars": 8038,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/mixins/word_mappings.py",
    "chars": 7196,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/models/__init__.py",
    "chars": 1564,
    "preview": "from .model import Model\n\n# General usage\nfrom .general_usage.feed_forward_network import FeedForwardNetwork\nfrom .gener"
  },
  {
    "path": "ptp/components/models/general_usage/attention_decoder.py",
    "chars": 11427,
    "preview": "# Copyright (C) Alexis Asseman, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");"
  },
  {
    "path": "ptp/components/models/general_usage/feed_forward_network.py",
    "chars": 6894,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/models/general_usage/recurrent_neural_network.py",
    "chars": 19408,
    "preview": "# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/models/general_usage/seq2seq.py",
    "chars": 9578,
    "preview": "# Copyright (C) aasseman, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
  },
  {
    "path": "ptp/components/models/language/index_embeddings.py",
    "chars": 3667,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the A"
  },
  {
    "path": "ptp/components/models/language/sentence_embeddings.py",
    "chars": 6029,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the A"
  },
  {
    "path": "ptp/components/models/model.py",
    "chars": 6442,
    "preview": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) tkornuta, IBM Corporation 2019\n#\n# Licensed under the Apache License, Version "
  },
  {
    "path": "ptp/components/models/multi_modal_reasoning/compact_bilinear_pooling.py",
    "chars": 7556,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache Lice"
  },
  {
    "path": "ptp/components/models/multi_modal_reasoning/factorized_bilinear_pooling.py",
    "chars": 5561,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache Lice"
  },
  {
    "path": "ptp/components/models/multi_modal_reasoning/low_rank_bilinear_pooling.py",
    "chars": 4778,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache Lice"
  },
  {
    "path": "ptp/components/models/multi_modal_reasoning/question_driven_attention.py",
    "chars": 8415,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache Lice"
  },
  {
    "path": "ptp/components/models/multi_modal_reasoning/relational_network.py",
    "chars": 7406,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache Lice"
  },
  {
    "path": "ptp/components/models/multi_modal_reasoning/self_attention.py",
    "chars": 4722,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache Lice"
  }
]

// ... and 78 more files (download for full content)

About this extraction

This page contains the full source code of the IBM/pytorchpipe GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 278 files (1.2 MB), approximately 305.4k tokens, and a symbol index with 655 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!