master f313e089050d cached
323 files
2.8 MB
746.2k tokens
1333 symbols
1 requests
Download .txt
Showing preview only (2,987K chars total). Download the full file or copy to clipboard to get everything.
Repository: santhoshkolloju/Abstractive-Summarization-With-Transfer-Learning
Branch: master
Commit: f313e089050d
Files: 323
Total size: 2.8 MB

Directory structure:
gitextract_ydk6seuh/

├── .ipynb_checkpoints/
│   └── Untitled-checkpoint.ipynb
├── BERT_SUMM.ipynb
├── Inference.py
├── Readme.md
├── bnb_4bit_training.ipynb
├── config.py
├── data/
│   ├── eval.tf_record
│   ├── eval_story.txt
│   ├── eval_summ.txt
│   ├── train.tf_record
│   ├── train_story.txt
│   └── train_summ.txt
├── main.py
├── model.py
├── models/
│   └── logging.txt
├── preprocess.py
├── texar_repo/
│   ├── .gitignore
│   ├── .pylintrc
│   ├── .travis.yml
│   ├── CHANGELOG.md
│   ├── LICENSE
│   ├── README.md
│   ├── bin/
│   │   ├── average_checkpoints.py
│   │   ├── train.py
│   │   └── utils/
│   │       ├── README.md
│   │       ├── apply_bpe
│   │       ├── learn_bpe
│   │       ├── make_vocab.py
│   │       ├── multi-bleu.perl
│   │       ├── spm_decode
│   │       ├── spm_encode
│   │       └── spm_train
│   ├── config.py
│   ├── docs/
│   │   ├── Makefile
│   │   ├── _static/
│   │   │   └── css/
│   │   │       └── custom_theme.css
│   │   ├── code/
│   │   │   ├── agents.rst
│   │   │   ├── context.rst
│   │   │   ├── core.rst
│   │   │   ├── data.rst
│   │   │   ├── evals.rst
│   │   │   ├── hyperparams.rst
│   │   │   ├── losses.rst
│   │   │   ├── models.rst
│   │   │   ├── modules.rst
│   │   │   ├── run.rst
│   │   │   ├── txtgen.rst
│   │   │   └── utils.rst
│   │   ├── conf.py
│   │   ├── examples.md
│   │   ├── get_started.md
│   │   ├── index.rst
│   │   ├── make.bat
│   │   ├── requirements.txt
│   │   └── tutorials/
│   │       └── tutorial.rst
│   ├── examples/
│   │   ├── README.md
│   │   ├── bert/
│   │   │   ├── README.md
│   │   │   ├── bert_classifier_main.py
│   │   │   ├── bert_config_lib/
│   │   │   │   ├── README.md
│   │   │   │   ├── __init__.py
│   │   │   │   └── config_model_uncased_L-12_H-768_A-12.py
│   │   │   ├── config_classifier.py
│   │   │   ├── config_data_mrpc.py
│   │   │   ├── config_data_sst.py
│   │   │   └── utils/
│   │   │       ├── data_utils.py
│   │   │       ├── model_utils.py
│   │   │       └── tokenization.py
│   │   ├── distributed_gpu/
│   │   │   ├── README.md
│   │   │   ├── config_large.py
│   │   │   ├── config_medium.py
│   │   │   ├── config_small.py
│   │   │   ├── lm_ptb_distributed.py
│   │   │   └── ptb_reader.py
│   │   ├── hierarchical_dialog/
│   │   │   ├── README.md
│   │   │   ├── config_data.py
│   │   │   ├── config_model_biminor.py
│   │   │   ├── config_model_uniminor.py
│   │   │   ├── hred.py
│   │   │   └── sw_loader.py
│   │   ├── language_model_ptb/
│   │   │   ├── README.md
│   │   │   ├── config_large.py
│   │   │   ├── config_medium.py
│   │   │   ├── config_small.py
│   │   │   ├── lm_ptb.py
│   │   │   └── ptb_reader.py
│   │   ├── memory_network_lm/
│   │   │   ├── README.md
│   │   │   ├── config.py
│   │   │   ├── lm_ptb_memnet.py
│   │   │   └── ptb_reader.py
│   │   ├── rl_gym/
│   │   │   ├── README.md
│   │   │   ├── ac_cartpole.py
│   │   │   ├── config.py
│   │   │   ├── dqn_cartpole.py
│   │   │   └── pg_cartpole.py
│   │   ├── sentence_classifier/
│   │   │   ├── README.md
│   │   │   ├── clas_main.py
│   │   │   ├── config_kim.py
│   │   │   └── sst_data_preprocessor.py
│   │   ├── seq2seq_attn/
│   │   │   ├── README.md
│   │   │   ├── config_iwslt14.py
│   │   │   ├── config_model.py
│   │   │   ├── config_model_full.py
│   │   │   ├── config_toy_copy.py
│   │   │   ├── prepare_data.py
│   │   │   └── seq2seq_attn.py
│   │   ├── seq2seq_configs/
│   │   │   ├── README.md
│   │   │   ├── config_data_toy_copy.yml
│   │   │   ├── config_model_medium.yml
│   │   │   └── config_model_small.yml
│   │   ├── seq2seq_exposure_bias/
│   │   │   ├── README.md
│   │   │   ├── baseline_seq2seq_attn_main.py
│   │   │   ├── configs/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── config_giga.py
│   │   │   │   ├── config_iwslt14.py
│   │   │   │   └── config_model.py
│   │   │   ├── interpolation_decoder.py
│   │   │   ├── interpolation_helper.py
│   │   │   ├── interpolation_main.py
│   │   │   ├── raml_main.py
│   │   │   ├── requirements.txt
│   │   │   ├── scheduled_sampling_main.py
│   │   │   └── utils/
│   │   │       ├── prepare_data.py
│   │   │       └── raml_samples_generation/
│   │   │           ├── README.md
│   │   │           ├── gen_samples_giga.sh
│   │   │           ├── gen_samples_iwslt14.sh
│   │   │           ├── process_samples.py
│   │   │           ├── util.py
│   │   │           └── vocab.py
│   │   ├── seq2seq_rl/
│   │   │   ├── README.md
│   │   │   ├── config_iwslt14.py
│   │   │   ├── config_model.py
│   │   │   ├── config_toy_copy.py
│   │   │   ├── prepare_data.py
│   │   │   └── seq2seq_attn_pg.py
│   │   ├── seqgan/
│   │   │   ├── README.md
│   │   │   ├── config_coco.py
│   │   │   ├── config_ptb_large.py
│   │   │   ├── config_ptb_medium.py
│   │   │   ├── config_ptb_small.py
│   │   │   ├── data_utils.py
│   │   │   └── seqgan_train.py
│   │   ├── sequence_tagging/
│   │   │   ├── README.md
│   │   │   ├── config.py
│   │   │   ├── conll_reader.py
│   │   │   ├── conll_writer.py
│   │   │   ├── conlleval
│   │   │   ├── ner.py
│   │   │   └── scores.py
│   │   ├── text_style_transfer/
│   │   │   ├── README.md
│   │   │   ├── config.py
│   │   │   ├── ctrl_gen_model.py
│   │   │   ├── main.py
│   │   │   └── prepare_data.py
│   │   ├── torchtext/
│   │   │   ├── .gitignore
│   │   │   ├── README.md
│   │   │   ├── batchfirst_bptt.py
│   │   │   ├── config_small.py
│   │   │   ├── lm_torchtext.py
│   │   │   └── requirements.txt
│   │   ├── transformer/
│   │   │   ├── README.md
│   │   │   ├── bleu_tool.py
│   │   │   ├── config_iwslt15.py
│   │   │   ├── config_model.py
│   │   │   ├── config_wmt14.py
│   │   │   ├── preprocess_data.sh
│   │   │   ├── requirements.txt
│   │   │   ├── scripts/
│   │   │   │   ├── iwslt15_en_vi.sh
│   │   │   │   └── wmt14_en_de.sh
│   │   │   ├── transformer_main.py
│   │   │   └── utils/
│   │   │       ├── __init__.py
│   │   │       ├── data_utils.py
│   │   │       ├── preprocess.py
│   │   │       └── utils.py
│   │   └── vae_text/
│   │       ├── README.md
│   │       ├── config_lstm_ptb.py
│   │       ├── config_lstm_yahoo.py
│   │       ├── config_trans_ptb.py
│   │       ├── config_trans_yahoo.py
│   │       ├── prepare_data.py
│   │       └── vae_train.py
│   ├── requirements.txt
│   ├── setup.py
│   └── texar/
│       ├── __init__.py
│       ├── agents/
│       │   ├── __init__.py
│       │   ├── ac_agent.py
│       │   ├── agent_base.py
│       │   ├── agent_gym_utils.py
│       │   ├── agent_utils.py
│       │   ├── agent_utils_test.py
│       │   ├── dqn_agent.py
│       │   ├── episodic_agent_base.py
│       │   ├── pg_agent.py
│       │   ├── seq_agent_base.py
│       │   ├── seq_pg_agent.py
│       │   └── seq_pg_agent_test.py
│       ├── context.py
│       ├── context_test.py
│       ├── core/
│       │   ├── __init__.py
│       │   ├── explorations.py
│       │   ├── layers.py
│       │   ├── layers_test.py
│       │   ├── optimization.py
│       │   ├── optimization_test.py
│       │   └── replay_memories.py
│       ├── data/
│       │   ├── __init__.py
│       │   ├── data/
│       │   │   ├── __init__.py
│       │   │   ├── data_base.py
│       │   │   ├── data_iterators.py
│       │   │   ├── data_iterators_test.py
│       │   │   ├── dataset_utils.py
│       │   │   ├── dataset_utils_test.py
│       │   │   ├── mono_text_data.py
│       │   │   ├── mono_text_data_test.py
│       │   │   ├── multi_aligned_data.py
│       │   │   ├── multi_aligned_data_test.py
│       │   │   ├── paired_text_data.py
│       │   │   ├── paired_text_data_test.py
│       │   │   ├── scalar_data.py
│       │   │   ├── scalar_data_test.py
│       │   │   └── text_data_base.py
│       │   ├── data_decoders.py
│       │   ├── data_utils.py
│       │   ├── data_utils_test.py
│       │   ├── embedding.py
│       │   ├── embedding_test.py
│       │   ├── vocabulary.py
│       │   └── vocabulary_test.py
│       ├── evals/
│       │   ├── __init__.py
│       │   ├── bleu.py
│       │   ├── bleu_moses.py
│       │   ├── bleu_test.py
│       │   └── metrics.py
│       ├── hyperparams.py
│       ├── hyperparams_test.py
│       ├── losses/
│       │   ├── __init__.py
│       │   ├── adv_losses.py
│       │   ├── adv_losses_test.py
│       │   ├── entropy.py
│       │   ├── losses_utils.py
│       │   ├── mle_losses.py
│       │   ├── mle_losses_test.py
│       │   ├── pg_losses.py
│       │   ├── rewards.py
│       │   ├── rewards_test.py
│       │   └── rl_losses.py
│       ├── models/
│       │   ├── __init__.py
│       │   ├── model_base.py
│       │   └── seq2seq/
│       │       ├── __init__.py
│       │       ├── basic_seq2seq.py
│       │       └── seq2seq_base.py
│       ├── module_base.py
│       ├── modules/
│       │   ├── __init__.py
│       │   ├── classifiers/
│       │   │   ├── __init__.py
│       │   │   ├── classifier_base.py
│       │   │   ├── conv_classifiers.py
│       │   │   ├── conv_classifiers_test.py
│       │   │   ├── rnn_classifiers.py
│       │   │   └── rnn_classifiers_test.py
│       │   ├── connectors/
│       │   │   ├── __init__.py
│       │   │   ├── connector_base.py
│       │   │   ├── connectors.py
│       │   │   └── connectors_test.py
│       │   ├── decoders/
│       │   │   ├── __init__.py
│       │   │   ├── beam_search_decode.py
│       │   │   ├── beam_search_decode_test.py
│       │   │   ├── rnn_decoder_base.py
│       │   │   ├── rnn_decoder_helpers.py
│       │   │   ├── rnn_decoders.py
│       │   │   ├── rnn_decoders_test.py
│       │   │   ├── transformer_decoders.py
│       │   │   └── transformer_decoders_test.py
│       │   ├── embedders/
│       │   │   ├── __init__.py
│       │   │   ├── embedder_base.py
│       │   │   ├── embedder_utils.py
│       │   │   ├── embedder_utils_test.py
│       │   │   ├── embedders.py
│       │   │   ├── embedders_test.py
│       │   │   └── position_embedders.py
│       │   ├── encoders/
│       │   │   ├── __init__.py
│       │   │   ├── conv_encoders.py
│       │   │   ├── conv_encoders_test.py
│       │   │   ├── encoder_base.py
│       │   │   ├── hierarchical_encoders.py
│       │   │   ├── hierarchical_encoders_test.py
│       │   │   ├── multihead_attention.py
│       │   │   ├── rnn_encoders.py
│       │   │   ├── rnn_encoders_test.py
│       │   │   └── transformer_encoders.py
│       │   ├── memory/
│       │   │   ├── __init__.py
│       │   │   ├── embed_fns.py
│       │   │   ├── memory_network.py
│       │   │   └── memory_network_test.py
│       │   ├── networks/
│       │   │   ├── __init__.py
│       │   │   ├── conv_networks.py
│       │   │   ├── conv_networks_test.py
│       │   │   ├── network_base.py
│       │   │   ├── networks.py
│       │   │   └── networks_test.py
│       │   ├── policies/
│       │   │   ├── __init__.py
│       │   │   ├── policy_nets.py
│       │   │   └── policy_nets_test.py
│       │   └── qnets/
│       │       ├── __init__.py
│       │       └── qnets.py
│       ├── run/
│       │   ├── __init__.py
│       │   ├── executor.py
│       │   └── executor_test.py
│       └── utils/
│           ├── __init__.py
│           ├── average_recorder.py
│           ├── average_recorder_test.py
│           ├── beam_search.py
│           ├── dtypes.py
│           ├── exceptions.py
│           ├── mode.py
│           ├── mode_test.py
│           ├── shapes.py
│           ├── shapes_test.py
│           ├── transformer_attentions.py
│           ├── transformer_utils.py
│           ├── utils.py
│           ├── utils_io.py
│           ├── utils_test.py
│           └── variables.py
└── uncased_L-12_H-768_A-12/
    ├── bert_config.json
    └── vocab.txt

================================================
FILE CONTENTS
================================================

================================================
FILE: .ipynb_checkpoints/Untitled-checkpoint.ipynb
================================================
{
 "cells": [],
 "metadata": {},
 "nbformat": 4,
 "nbformat_minor": 2
}


================================================
FILE: BERT_SUMM.ipynb
================================================
{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "BERT SUMM.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/santhoshkolloju/bert_summ/blob/master/BERT_SUMM.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "metadata": {
        "id": "pbgEu1oyhPca",
        "colab_type": "code",
        "outputId": "1ecf3d0e-75c4-4f0d-8a64-18fc6bcfb4a1",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "cell_type": "code",
      "source": [
        "import tensorflow as tf\n",
        "device_name = tf.test.gpu_device_name()\n",
        "if device_name != '/device:GPU:0':\n",
        "  raise SystemError('GPU device not found')\n",
        "print('Found GPU at: {}'.format(device_name))"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Found GPU at: /device:GPU:0\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "UqeTD_eGkxVd",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "from google.colab import auth\n",
        "auth.authenticate_user()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "SFCZikuuibh7",
        "colab_type": "code",
        "outputId": "f228c486-ff0b-4740-c9c2-10a2d9b893ba",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 267
        }
      },
      "cell_type": "code",
      "source": [
        "import sys\n",
        "\n",
        "!test -d texar_repo || git clone https://github.com/asyml/texar.git texar_repo\n",
        "if not 'texar_repo' in sys.path:\n",
        "  sys.path += ['texar_repo']\n",
        "!pip install funcsigs"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Cloning into 'texar_repo'...\n",
            "remote: Enumerating objects: 10204, done.\u001b[K\n",
            "remote: Total 10204 (delta 0), reused 0 (delta 0), pack-reused 10204\u001b[K\n",
            "Receiving objects: 100% (10204/10204), 2.30 MiB | 12.21 MiB/s, done.\n",
            "Resolving deltas: 100% (7774/7774), done.\n",
            "Collecting funcsigs\n",
            "  Downloading https://files.pythonhosted.org/packages/69/cb/f5be453359271714c01b9bd06126eaf2e368f1fddfff30818754b5ac2328/funcsigs-1.0.2-py2.py3-none-any.whl\n",
            "Installing collected packages: funcsigs\n",
            "Successfully installed funcsigs-1.0.2\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "_IB17muzsBd0",
        "colab_type": "code",
        "outputId": "34a14a15-84b6-47d0-d286-2ae052c858d0",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 74
        }
      },
      "cell_type": "code",
      "source": [
        "\"\"\"\n",
        "from google_drive_downloader import GoogleDriveDownloader as gdd\n",
        "\n",
        "gdd.download_file_from_google_drive(file_id='0BwmD_VLjROrfTHk4NFg2SndKcjQ',\n",
        "                                    dest_path='./cnn_stories.tgz',\n",
        "                                    unzip=True)\n",
        "!tar -zxf cnn_stories.tgz\n",
        "\n",
        "from os import listdir\n",
        "import string\n",
        "\n",
        "# load doc into memory\n",
        "def load_doc(filename):\n",
        "\t# open the file as read only\n",
        "\tfile = open(filename, encoding='utf-8')\n",
        "\t# read all text\n",
        "\ttext = file.read()\n",
        "\t# close the file\n",
        "\tfile.close()\n",
        "\treturn text\n",
        "\n",
        "# split a document into news story and highlights\n",
        "def split_story(doc):\n",
        "\t# find first highlight\n",
        "\tindex = doc.find('@highlight')\n",
        "\t# split into story and highlights\n",
        "\tstory, highlights = doc[:index], doc[index:].split('@highlight')\n",
        "\t# strip extra white space around each highlight\n",
        "\thighlights = [h.strip() for h in highlights if len(h) > 0]\n",
        "\treturn story, highlights\n",
        "\n",
        "# load all stories in a directory\n",
        "def load_stories(directory):\n",
        "\tstories = list()\n",
        "\tfor name in listdir(directory):\n",
        "\t\tfilename = directory + '/' + name\n",
        "\t\t# load document\n",
        "\t\tdoc = load_doc(filename)\n",
        "\t\t# split into story and highlights\n",
        "\t\tstory, highlights = split_story(doc)\n",
        "\t\t# store\n",
        "\t\tstories.append({'story':story, 'highlights':highlights})\n",
        "\treturn stories\n",
        "\n",
        "# clean a list of lines\n",
        "def clean_lines(lines):\n",
        "\tcleaned = list()\n",
        "\t# prepare a translation table to remove punctuation\n",
        "\ttable = str.maketrans('', '', string.punctuation)\n",
        "\tfor line in lines:\n",
        "\t\t# strip source cnn office if it exists\n",
        "\t\tindex = line.find('(CNN) -- ')\n",
        "\t\tif index > -1:\n",
        "\t\t\tline = line[index+len('(CNN)'):]\n",
        "\t\t# tokenize on white space\n",
        "\t\tline = line.split()\n",
        "\t\t# convert to lower case\n",
        "\t\tline = [word.lower() for word in line]\n",
        "\t\t# remove punctuation from each token\n",
        "\t\tline = [w.translate(table) for w in line]\n",
        "\t\t# remove tokens with numbers in them\n",
        "\t\tline = [word for word in line if word.isalpha()]\n",
        "\t\t# store as string\n",
        "\t\tcleaned.append(' '.join(line))\n",
        "\t# remove empty strings\n",
        "\tcleaned = [c for c in cleaned if len(c) > 0]\n",
        "\treturn cleaned\n",
        "\n",
        "# load stories\n",
        "directory = 'cnn/stories/'\n",
        "stories = load_stories(directory)\n",
        "print('Loaded Stories %d' % len(stories))\n",
        "\n",
        "# clean stories\n",
        "f1 = open(\"stories.txt\",'w')\n",
        "f2 = open(\"summary.txt\",'w')\n",
        "for example in stories:\n",
        "  example['story'] = clean_lines(example['story'].split('\\n'))\n",
        "  example['highlights'] = clean_lines(example['highlights'])\n",
        "  f1.write(\" \".join(example['story']))\n",
        "  f1.write(\"\\n\")\n",
        "  f2.write(\" \".join(example['highlights']))\n",
        "  f2.write(\"\\n\")\n",
        "f1.close()\n",
        "f2.close()\n",
        "  \n",
        "story = open(\"stories.txt\").readlines()\n",
        "summ = open(\"summary.txt\").readlines() \n",
        "train_story = story[0:90000]\n",
        "train_summ = summ[0:90000]\n",
        "\n",
        "eval_story = story[90000:91579]\n",
        "eval_summ = summ[90000:91579]\n",
        "\n",
        "\n",
        "test_story = story[91579:92579]\n",
        "test_summ = summ[91579:92579]\n",
        "\n",
        "\n",
        "with open(\"train_story.txt\",'w') as f:\n",
        "  f.write(\"\\n\".join(train_story))\n",
        "  \n",
        "with open(\"train_summ.txt\",'w') as f:\n",
        "  f.write(\"\\n\".join(train_summ))\n",
        "  \n",
        "with open(\"eval_story.txt\",'w') as f:\n",
        "  f.write(\"\\n\".join(eval_story))\n",
        "  \n",
        "  \n",
        "with open(\"eval_summ.txt\",'w') as f:\n",
        "  f.write(\"\\n\".join(eval_summ))\n",
        "  \n",
        "  \n",
        "with open(\"test_story.txt\",'w') as f:\n",
        "  f.write(\"\\n\".join(test_story))\n",
        "  \n",
        "  \n",
        "with open(\"test_summ.txt\",'w') as f:\n",
        "  f.write(\"\\n\".join(test_summ))  \n",
        "  \"\"\""
      ],
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'\\nfrom google_drive_downloader import GoogleDriveDownloader as gdd\\n\\ngdd.download_file_from_google_drive(file_id=\\'0BwmD_VLjROrfTHk4NFg2SndKcjQ\\',\\n                                    dest_path=\\'./cnn_stories.tgz\\',\\n                                    unzip=True)\\n!tar -zxf cnn_stories.tgz\\n\\nfrom os import listdir\\nimport string\\n\\n# load doc into memory\\ndef load_doc(filename):\\n\\t# open the file as read only\\n\\tfile = open(filename, encoding=\\'utf-8\\')\\n\\t# read all text\\n\\ttext = file.read()\\n\\t# close the file\\n\\tfile.close()\\n\\treturn text\\n\\n# split a document into news story and highlights\\ndef split_story(doc):\\n\\t# find first highlight\\n\\tindex = doc.find(\\'@highlight\\')\\n\\t# split into story and highlights\\n\\tstory, highlights = doc[:index], doc[index:].split(\\'@highlight\\')\\n\\t# strip extra white space around each highlight\\n\\thighlights = [h.strip() for h in highlights if len(h) > 0]\\n\\treturn story, highlights\\n\\n# load all stories in a directory\\ndef load_stories(directory):\\n\\tstories = list()\\n\\tfor name in listdir(directory):\\n\\t\\tfilename = directory + \\'/\\' + name\\n\\t\\t# load document\\n\\t\\tdoc = load_doc(filename)\\n\\t\\t# split into story and highlights\\n\\t\\tstory, highlights = split_story(doc)\\n\\t\\t# store\\n\\t\\tstories.append({\\'story\\':story, \\'highlights\\':highlights})\\n\\treturn stories\\n\\n# clean a list of lines\\ndef clean_lines(lines):\\n\\tcleaned = list()\\n\\t# prepare a translation table to remove punctuation\\n\\ttable = str.maketrans(\\'\\', \\'\\', string.punctuation)\\n\\tfor line in lines:\\n\\t\\t# strip source cnn office if it exists\\n\\t\\tindex = line.find(\\'(CNN) -- \\')\\n\\t\\tif index > -1:\\n\\t\\t\\tline = line[index+len(\\'(CNN)\\'):]\\n\\t\\t# tokenize on white space\\n\\t\\tline = line.split()\\n\\t\\t# convert to lower case\\n\\t\\tline = [word.lower() for word in line]\\n\\t\\t# remove punctuation from each token\\n\\t\\tline = [w.translate(table) for w in line]\\n\\t\\t# remove tokens with numbers in them\\n\\t\\tline = [word for word in line if word.isalpha()]\\n\\t\\t# store as string\\n\\t\\tcleaned.append(\\' \\'.join(line))\\n\\t# remove empty strings\\n\\tcleaned = [c for c in cleaned if len(c) > 0]\\n\\treturn cleaned\\n\\n# load stories\\ndirectory = \\'cnn/stories/\\'\\nstories = load_stories(directory)\\nprint(\\'Loaded Stories %d\\' % len(stories))\\n\\n# clean stories\\nf1 = open(\"stories.txt\",\\'w\\')\\nf2 = open(\"summary.txt\",\\'w\\')\\nfor example in stories:\\n  example[\\'story\\'] = clean_lines(example[\\'story\\'].split(\\'\\n\\'))\\n  example[\\'highlights\\'] = clean_lines(example[\\'highlights\\'])\\n  f1.write(\" \".join(example[\\'story\\']))\\n  f1.write(\"\\n\")\\n  f2.write(\" \".join(example[\\'highlights\\']))\\n  f2.write(\"\\n\")\\nf1.close()\\nf2.close()\\n  \\nstory = open(\"stories.txt\").readlines()\\nsumm = open(\"summary.txt\").readlines() \\ntrain_story = story[0:90000]\\ntrain_summ = summ[0:90000]\\n\\neval_story = story[90000:91579]\\neval_summ = summ[90000:91579]\\n\\n\\ntest_story = story[91579:92579]\\ntest_summ = summ[91579:92579]\\n\\n\\nwith open(\"train_story.txt\",\\'w\\') as f:\\n  f.write(\"\\n\".join(train_story))\\n  \\nwith open(\"train_summ.txt\",\\'w\\') as f:\\n  f.write(\"\\n\".join(train_summ))\\n  \\nwith open(\"eval_story.txt\",\\'w\\') as f:\\n  f.write(\"\\n\".join(eval_story))\\n  \\n  \\nwith open(\"eval_summ.txt\",\\'w\\') as f:\\n  f.write(\"\\n\".join(eval_summ))\\n  \\n  \\nwith open(\"test_story.txt\",\\'w\\') as f:\\n  f.write(\"\\n\".join(test_story))\\n  \\n  \\nwith open(\"test_summ.txt\",\\'w\\') as f:\\n  f.write(\"\\n\".join(test_summ))  \\n  '"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 4
        }
      ]
    },
    {
      "metadata": {
        "id": "SsWJmIfmij-_",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import os\n",
        "import csv\n",
        "import collections\n",
        "import sys\n",
        "from texar_repo.examples.bert.utils import data_utils, model_utils, tokenization\n",
        "import importlib\n",
        "import tensorflow as tf\n",
        "import texar as tx \n",
        "from texar_repo.examples.bert import config_classifier as config_downstream\n",
        "from texar_repo.texar.utils import transformer_utils\n",
        "from texar_repo.examples.transformer.utils import data_utils, utils\n",
        "from texar_repo.examples.transformer.bleu_tool import bleu_wrapper\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "Ko2McfcdhbcN",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "#config\n",
        "\n",
        "dcoder_config = {\n",
        "    'dim': 768,\n",
        "    'num_blocks': 6,\n",
        "    'multihead_attention': {\n",
        "        'num_heads': 8,\n",
        "        'output_dim': 768\n",
        "        # See documentation for more optional hyperparameters\n",
        "    },\n",
        "    'position_embedder_hparams': {\n",
        "        'dim': 768\n",
        "    },\n",
        "    'initializer': {\n",
        "        'type': 'variance_scaling_initializer',\n",
        "        'kwargs': {\n",
        "            'scale': 1.0,\n",
        "            'mode': 'fan_avg',\n",
        "            'distribution': 'uniform',\n",
        "        },\n",
        "    },\n",
        "    'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(\n",
        "        output_dim=768)\n",
        "}\n",
        "\n",
        "loss_label_confidence = 0.9\n",
        "\n",
        "random_seed = 1234\n",
        "beam_width = 5\n",
        "alpha = 0.6\n",
        "hidden_dim = 768\n",
        "\n",
        "\n",
        "opt = {\n",
        "    'optimizer': {\n",
        "        'type': 'AdamOptimizer',\n",
        "        'kwargs': {\n",
        "            'beta1': 0.9,\n",
        "            'beta2': 0.997,\n",
        "            'epsilon': 1e-9\n",
        "        }\n",
        "    }\n",
        "}\n",
        "\n",
        "\n",
        "lr = {\n",
        "    'learning_rate_schedule': 'constant.linear_warmup.rsqrt_decay.rsqrt_depth',\n",
        "    'lr_constant': 2 * (hidden_dim ** -0.5),\n",
        "    'static_lr': 1e-3,\n",
        "    'warmup_steps': 2000,\n",
        "}\n",
        "\n",
        "bos_token_id =101\n",
        "eos_token_id = 102\n",
        "\n",
        "model_dir= \"./models\"\n",
        "run_mode= \"train_and_evaluate\"\n",
        "batch_size = 32\n",
        "test_batch_size = 32\n",
        "\n",
        "max_train_epoch = 20\n",
        "display_steps = 100\n",
        "eval_steps = 100000\n",
        "\n",
        "max_decoding_length = 400\n",
        "\n",
        "max_seq_length_src = 512\n",
        "max_seq_length_tgt = 400\n",
        "\n",
        "bert_pretrain_dir = 'bert_pretrained_models/uncased_L-12_H-768_A-12'\n",
        "#config"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "MrBw61rEiXeE",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "!mkdir bert_pretrained_models"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "FH2RX773i40g",
        "colab_type": "code",
        "outputId": "3e61c01b-e31b-485b-89e0-b51fddc341d4",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 354
        }
      },
      "cell_type": "code",
      "source": [
        "!wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip -P bert_pretrained_models/;\n",
        "!unzip bert_pretrained_models/uncased_L-12_H-768_A-12.zip -d bert_pretrained_models/\n"
      ],
      "execution_count": 11,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "--2019-01-31 01:48:29--  https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip\n",
            "Resolving storage.googleapis.com (storage.googleapis.com)... 64.233.181.128, 2607:f8b0:4001:c0f::80\n",
            "Connecting to storage.googleapis.com (storage.googleapis.com)|64.233.181.128|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 407727028 (389M) [application/zip]\n",
            "Saving to: ‘bert_pretrained_models/uncased_L-12_H-768_A-12.zip’\n",
            "\n",
            "uncased_L-12_H-768_ 100%[===================>] 388.84M   136MB/s    in 2.9s    \n",
            "\n",
            "2019-01-31 01:48:32 (136 MB/s) - ‘bert_pretrained_models/uncased_L-12_H-768_A-12.zip’ saved [407727028/407727028]\n",
            "\n",
            "Archive:  bert_pretrained_models/uncased_L-12_H-768_A-12.zip\n",
            "   creating: bert_pretrained_models/uncased_L-12_H-768_A-12/\n",
            "  inflating: bert_pretrained_models/uncased_L-12_H-768_A-12/bert_model.ckpt.meta  \n",
            "  inflating: bert_pretrained_models/uncased_L-12_H-768_A-12/bert_model.ckpt.data-00000-of-00001  \n",
            "  inflating: bert_pretrained_models/uncased_L-12_H-768_A-12/vocab.txt  \n",
            "  inflating: bert_pretrained_models/uncased_L-12_H-768_A-12/bert_model.ckpt.index  \n",
            "  inflating: bert_pretrained_models/uncased_L-12_H-768_A-12/bert_config.json  \n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "JC60vOxGjI-M",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "class InputExample():\n",
        "    \"\"\"A single training/test example for simple sequence classification.\"\"\"\n",
        "\n",
        "    def __init__(self, guid, text_a, text_b=None):\n",
        "        \"\"\"Constructs a InputExample.\n",
        "        Args:\n",
        "            guid: Unique id for the example.\n",
        "            text_a: string. The untokenized text of the first sequence.\n",
        "                For single sequence tasks, only this sequence must be specified.\n",
        "            text_b: (Optional) string. The untokenized text of the second\n",
        "                sequence. Only must be specified for sequence pair tasks.\n",
        "            label: (Optional) string. The label of the example. This should be\n",
        "                specified for train and dev examples, but not for test examples.\n",
        "        \"\"\"\n",
        "        self.guid = guid\n",
        "        self.src_txt = text_a\n",
        "        self.tgt_txt = text_b\n",
        "        \n",
        "class InputFeatures():\n",
        "    \"\"\"A single set of features of data.\"\"\"\n",
        "\n",
        "    def __init__(self, src_input_ids,src_input_mask,src_segment_ids,tgt_input_ids,tgt_input_mask,tgt_labels):\n",
        "        self.src_input_ids = src_input_ids\n",
        "        self.src_input_mask = src_input_mask\n",
        "        self.src_segment_ids = src_segment_ids\n",
        "        self.tgt_input_ids = tgt_input_ids\n",
        "        self.tgt_input_mask = tgt_input_mask \n",
        "        self.tgt_labels = tgt_labels\n",
        "        \n",
        "       \n",
        "class DataProcessor(object):\n",
        "    \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n",
        "\n",
        "    def get_train_examples(self, data_dir):\n",
        "        \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n",
        "        raise NotImplementedError()\n",
        "\n",
        "    def get_dev_examples(self, data_dir):\n",
        "        \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n",
        "        raise NotImplementedError()\n",
        "\n",
        "    def get_test_examples(self, data_dir):\n",
        "        \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n",
        "        raise NotImplementedError()\n",
        "\n",
        "    def get_labels(self):\n",
        "        \"\"\"Gets the list of labels for this data set.\"\"\"\n",
        "        raise NotImplementedError()\n",
        "\n",
        "    @classmethod\n",
        "    def _read_tsv(cls, input_file, quotechar=None):\n",
        "        \"\"\"Reads a tab separated value file.\"\"\"\n",
        "        with tf.gfile.Open(input_file, \"r\") as f:\n",
        "            reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n",
        "            lines = []\n",
        "            i = 0\n",
        "            for line in reader:\n",
        "                lines.append(line)\n",
        "        return lines\n",
        "\n",
        "\n",
        "    @classmethod\n",
        "    def _read_file(cls, input_file, quotechar=None):\n",
        "        \"\"\"Reads a tab separated value file.\"\"\"\n",
        "        with tf.gfile.Open(input_file, \"r\") as f:\n",
        "            reader = csv.reader(f, delimiter=\"\\n\", quotechar=quotechar)\n",
        "            lines = []\n",
        "            i = 0\n",
        "            for line in reader:\n",
        "                lines.append(line)\n",
        "        return lines\n",
        "      \n",
        "      \n",
        "class CNNDailymail(DataProcessor):\n",
        "    \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n",
        "\n",
        "    def get_train_examples(self, data_dir):\n",
        "        \"\"\"See base class.\"\"\"\n",
        "        return self._create_examples(\n",
        "            self._read_file(os.path.join(data_dir, \"train_story.txt\")),self._read_file(os.path.join(data_dir, \"train_summ.txt\")),\n",
        "            \"train\")\n",
        "\n",
        "    def get_dev_examples(self, data_dir):\n",
        "        \"\"\"See base class.\"\"\"\n",
        "        return self._create_examples(\n",
        "            self._read_file(os.path.join(data_dir, \"eval_story.txt\")),self._read_file(os.path.join(data_dir, \"eval_summ.txt\")),\n",
        "            \"dev\")\n",
        "\n",
        "    def get_test_examples(self, data_dir):\n",
        "        \"\"\"See base class.\"\"\"\n",
        "        return self._create_examples(\n",
        "            self._read_file(os.path.join(data_dir, \"test_story.txt\")),self._read_file(os.path.join(data_dir, \"test_summ.txt\")),\n",
        "            \"test\")\n",
        "\n",
        "    def _create_examples(self, src_lines,tgt_lines,set_type):\n",
        "        examples = [] \n",
        "        for i,data in enumerate(zip(src_lines,tgt_lines)):\n",
        "            guid = \"%s-%s\" % (set_type, i)\n",
        "            if set_type == \"test\" and i == 0:\n",
        "                continue\n",
        "            else:\n",
        "                #print(data)\n",
        "                if len(data[0])==0 or len(data[1])==0:\n",
        "                  continue\n",
        "                src_lines = tokenization.convert_to_unicode(data[0][0])\n",
        "                tgt_lines = tokenization.convert_to_unicode(data[1][0])\n",
        "                examples.append(InputExample(guid=guid, text_a=src_lines,\n",
        "                                         text_b=tgt_lines))\n",
        "        return examples\n",
        "  \n",
        "  \n",
        "def file_based_convert_examples_to_features(\n",
        "        examples, max_seq_length_src,max_seq_length_tgt,tokenizer, output_file):\n",
        "    \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n",
        "\n",
        "    writer = tf.python_io.TFRecordWriter(output_file)\n",
        "\n",
        "    for (ex_index, example) in enumerate(examples):\n",
        "        #print(\"ex_index\",ex_index)\n",
        "\n",
        "        if (ex_index+1) %1000 == 0 :\n",
        "          print(\"------------processed..{}...examples\".format(ex_index))\n",
        "          \n",
        "        feature = convert_single_example(ex_index, example,\n",
        "                                         max_seq_length_src,max_seq_length_tgt,tokenizer)\n",
        "\n",
        "        def create_int_feature(values):\n",
        "            return tf.train.Feature(\n",
        "                int64_list=tf.train.Int64List(value=list(values)))\n",
        "\n",
        "        features = collections.OrderedDict()\n",
        "        features[\"src_input_ids\"] = create_int_feature(feature.src_input_ids)\n",
        "        features[\"src_input_mask\"] = create_int_feature(feature.src_input_mask)\n",
        "        features[\"src_segment_ids\"] = create_int_feature(feature.src_segment_ids)\n",
        "\n",
        "        features[\"tgt_input_ids\"] = create_int_feature(feature.tgt_input_ids)\n",
        "        features[\"tgt_input_mask\"] = create_int_feature(feature.tgt_input_mask)\n",
        "        features['tgt_labels'] = create_int_feature(feature.tgt_labels)\n",
        "        \n",
        "        \n",
        "        \n",
        "        #print(feature.tgt_labels)\n",
        "        \n",
        "\n",
        "        tf_example = tf.train.Example(\n",
        "            features=tf.train.Features(feature=features))\n",
        "        writer.write(tf_example.SerializeToString())\n",
        "\n",
        "\n",
        "def convert_single_example(ex_index, example, max_seq_length_src,max_seq_length_tgt,\n",
        "                           tokenizer):\n",
        "    \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n",
        "    \"\"\"\n",
        "    label_map = {}\n",
        "    for (i, label) in enumerate(label_list):\n",
        "        label_map[label] = i\n",
        "    \"\"\"\n",
        "    tokens_a = tokenizer.tokenize(example.src_txt)\n",
        "    tokens_b = tokenizer.tokenize(example.tgt_txt)\n",
        "\n",
        "\n",
        "    # Modifies `tokens_a` and `tokens_b` in place so that the total\n",
        "    # length is less than the specified length.\n",
        "    # Account for [CLS], [SEP], [SEP] with \"- 3\"\n",
        "    if len(tokens_a) > max_seq_length_src - 2:\n",
        "            tokens_a = tokens_a[0:(max_seq_length_src - 2)]\n",
        "    \n",
        "    if len(tokens_b) > max_seq_length_tgt - 2:\n",
        "            tokens_b = tokens_b[0:(max_seq_length_tgt - 2)]\n",
        "\n",
        "    \n",
        "    tokens_src = []\n",
        "    segment_ids_src = []\n",
        "    tokens_src.append(\"[CLS]\")\n",
        "    segment_ids_src.append(0)\n",
        "    for token in tokens_a:\n",
        "        tokens_src.append(token)\n",
        "        segment_ids_src.append(0)\n",
        "    tokens_src.append(\"[SEP]\")\n",
        "    segment_ids_src.append(0)\n",
        "  \n",
        "\n",
        "    tokens_tgt = []\n",
        "    segment_ids_tgt = []\n",
        "    tokens_tgt.append(\"[CLS]\")\n",
        "    #segment_ids_tgt.append(0)\n",
        "    for token in tokens_b:\n",
        "        tokens_tgt.append(token)\n",
        "        #segment_ids_tgt.append(0)\n",
        "    tokens_tgt.append(\"[SEP]\")\n",
        "    #segment_ids_tgt.append(0)\n",
        "\n",
        "    input_ids_src = tokenizer.convert_tokens_to_ids(tokens_src)\n",
        "   \n",
        "    \n",
        "\n",
        "    input_ids_tgt = tokenizer.convert_tokens_to_ids(tokens_tgt)\n",
        "    \n",
        "    #Adding begiining and end token\n",
        "    input_ids_tgt = input_ids_tgt[:-1] \n",
        "    \n",
        "    input_mask_src = [1] * len(input_ids_src)\n",
        "\n",
        "\n",
        "    input_mask_tgt = [1] * len(input_ids_tgt)\n",
        "    \n",
        "    labels_tgt = input_ids_tgt[1:]\n",
        "    \n",
        "    \n",
        "    labels_tgt.append(0)\n",
        "    \n",
        "    #print(len(input_ids_tgt))\n",
        "    #print(len(input_mask_tgt))\n",
        "    #print(len(labels_tgt))\n",
        "    #print(len(segment_ids_tgt))\n",
        "    \n",
        "    while len(input_ids_src) < max_seq_length_src:\n",
        "        input_ids_src.append(0)\n",
        "        input_mask_src.append(0)\n",
        "        segment_ids_src.append(0)\n",
        "\n",
        "    while len(input_ids_tgt) < max_seq_length_tgt:\n",
        "        input_ids_tgt.append(0)\n",
        "        input_mask_tgt.append(0)\n",
        "        segment_ids_tgt.append(0)\n",
        "        labels_tgt.append(0)\n",
        "\n",
        "    feature = InputFeatures( src_input_ids=input_ids_src,src_input_mask=input_mask_src,src_segment_ids=segment_ids_src,\n",
        "        tgt_input_ids=input_ids_tgt,tgt_input_mask=input_mask_tgt,tgt_labels=labels_tgt)\n",
        "\n",
        "    \n",
        "    return feature\n",
        "\n",
        "\n",
        "def file_based_input_fn_builder(input_file, max_seq_length_src,max_seq_length_tgt, is_training,\n",
        "                                drop_remainder, is_distributed=False):\n",
        "    \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n",
        "\n",
        "    name_to_features = {\n",
        "        \"src_input_ids\": tf.FixedLenFeature([max_seq_length_src], tf.int64),\n",
        "        \"src_input_mask\": tf.FixedLenFeature([max_seq_length_src], tf.int64),\n",
        "        \"src_segment_ids\": tf.FixedLenFeature([max_seq_length_src], tf.int64),\n",
        "        \"tgt_input_ids\": tf.FixedLenFeature([max_seq_length_tgt], tf.int64),\n",
        "        \"tgt_input_mask\": tf.FixedLenFeature([max_seq_length_tgt], tf.int64),\n",
        "        \"tgt_labels\" : tf.FixedLenFeature([max_seq_length_tgt], tf.int64),\n",
        "        \n",
        "        \n",
        "    }\n",
        "\n",
        "    def _decode_record(record, name_to_features):\n",
        "        \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n",
        "        example = tf.parse_single_example(record, name_to_features)\n",
        "        print(example)\n",
        "        print(example.keys())\n",
        "\n",
        "        # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n",
        "        # So cast all int64 to int32.\n",
        "        for name in list(example.keys()):\n",
        "            t = example[name]\n",
        "            if t.dtype == tf.int64:\n",
        "                t = tf.to_int32(t)\n",
        "            example[name] = t\n",
        "\n",
        "        return example\n",
        "\n",
        "    def input_fn(params):\n",
        "        \"\"\"The actual input function.\"\"\"\n",
        "        batch_size = params[\"batch_size\"]\n",
        "\n",
        "        # For training, we want a lot of parallel reading and shuffling.\n",
        "        # For eval, we want no shuffling and parallel reading doesn't matter.\n",
        "        d = tf.data.TFRecordDataset(input_file)\n",
        "        if is_training:\n",
        "\n",
        "            if is_distributed:\n",
        "                import horovod.tensorflow as hvd\n",
        "                tf.logging.info('distributed mode is enabled.'\n",
        "                                'size:{} rank:{}'.format(hvd.size(), hvd.rank()))\n",
        "                # https://github.com/uber/horovod/issues/223\n",
        "                d = d.shard(hvd.size(), hvd.rank())\n",
        "\n",
        "                d = d.repeat()\n",
        "                d = d.shuffle(buffer_size=100)\n",
        "                d = d.apply(\n",
        "                    tf.contrib.data.map_and_batch(\n",
        "                        lambda record: _decode_record(record, name_to_features),\n",
        "                        batch_size=batch_size//hvd.size(),\n",
        "                        drop_remainder=drop_remainder))\n",
        "            else:\n",
        "                tf.logging.info('distributed mode is not enabled.')\n",
        "                d = d.repeat()\n",
        "                d = d.shuffle(buffer_size=100)\n",
        "                d = d.apply(\n",
        "                    tf.contrib.data.map_and_batch(\n",
        "                        lambda record: _decode_record(record, name_to_features),\n",
        "                        batch_size=batch_size,\n",
        "                        drop_remainder=drop_remainder))\n",
        "\n",
        "        else:\n",
        "            d = d.apply(\n",
        "                tf.contrib.data.map_and_batch(\n",
        "                    lambda record: _decode_record(record, name_to_features),\n",
        "                    batch_size=batch_size,\n",
        "                    drop_remainder=drop_remainder))\n",
        "\n",
        "        return d\n",
        "    return input_fn\n",
        "  \n",
        "  \n",
        "def get_dataset(processor,\n",
        "                tokenizer,\n",
        "                data_dir,\n",
        "                max_seq_length_src,\n",
        "                max_seq_length_tgt,\n",
        "                batch_size,\n",
        "                mode,\n",
        "                output_dir,\n",
        "                is_distributed=False):\n",
        "    \"\"\"\n",
        "    Args:\n",
        "        processor: Data Preprocessor, must have get_lables,\n",
        "            get_train/dev/test/examples methods defined.\n",
        "        tokenizer: The Sentence Tokenizer. Generally should be\n",
        "            SentencePiece Model.\n",
        "        data_dir: The input data directory.\n",
        "        max_seq_length: Max sequence length.\n",
        "        batch_size: mini-batch size.\n",
        "        model: `train`, `eval` or `test`.\n",
        "        output_dir: The directory to save the TFRecords in.\n",
        "    \"\"\"\n",
        "    #label_list = processor.get_labels()\n",
        "    if mode == 'train':\n",
        "        #train_examples = processor.get_train_examples(data_dir)\n",
        "        #train_file = os.path.join(output_dir, \"train.tf_record\")\n",
        "        train_file = \"gs://bert_summ/train.tf_record\"\n",
        "        #file_based_convert_examples_to_features(\n",
        "        #    train_examples, max_seq_length_src,max_seq_length_tgt,\n",
        "        #    tokenizer, train_file)\n",
        "        dataset = file_based_input_fn_builder(\n",
        "            input_file=train_file,\n",
        "            max_seq_length_src=max_seq_length_src,\n",
        "            max_seq_length_tgt =max_seq_length_tgt,\n",
        "            is_training=True,\n",
        "            drop_remainder=True,\n",
        "            is_distributed=is_distributed)({'batch_size': batch_size})\n",
        "    elif mode == 'eval':\n",
        "        #eval_examples = processor.get_dev_examples(data_dir)\n",
        "        #eval_file = os.path.join(output_dir, \"eval.tf_record\")\n",
        "        eval_file = \"gs://bert_summ/eval.tf_record\"\n",
        "        #file_based_convert_examples_to_features(\n",
        "        #    eval_examples, max_seq_length_src,max_seq_length_tgt,\n",
        "        #    tokenizer, eval_file)\n",
        "        dataset = file_based_input_fn_builder(\n",
        "            input_file=eval_file,\n",
        "            max_seq_length_src=max_seq_length_src,\n",
        "            max_seq_length_tgt =max_seq_length_tgt,\n",
        "            is_training=False,\n",
        "            drop_remainder=True,\n",
        "            is_distributed=is_distributed)({'batch_size': batch_size})\n",
        "    elif mode == 'test':\n",
        "      \n",
        "        #test_examples = processor.get_test_examples(data_dir)\n",
        "        #test_file = os.path.join(output_dir, \"predict.tf_record\")\n",
        "        test_file = \"gs://bert_summ/predict.tf_record\"\n",
        "        \n",
        "        #file_based_convert_examples_to_features(\n",
        "        #    test_examples, max_seq_length_src,max_seq_length_tgt,\n",
        "        #    tokenizer, test_file)\n",
        "        dataset = file_based_input_fn_builder(\n",
        "            input_file=test_file,\n",
        "            max_seq_length_src=max_seq_length_src,\n",
        "            max_seq_length_tgt =max_seq_length_tgt,\n",
        "            is_training=False,\n",
        "            drop_remainder=True,\n",
        "            is_distributed=is_distributed)({'batch_size': batch_size})\n",
        "    return dataset"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "Nn4vhTvJjT0D",
        "colab_type": "code",
        "outputId": "f4080019-27c1-4f01-e498-a2b1d7600beb",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 227
        }
      },
      "cell_type": "code",
      "source": [
        "bert_config = model_utils.transform_bert_to_texar_config(\n",
        "            os.path.join(bert_pretrain_dir, 'bert_config.json'))\n",
        "\n",
        "\n",
        "\n",
        "tokenizer = tokenization.FullTokenizer(\n",
        "        vocab_file=os.path.join(bert_pretrain_dir, 'vocab.txt'),\n",
        "        do_lower_case=True)\n",
        "\n",
        "vocab_size = len(tokenizer.vocab)\n",
        "\n",
        "processor = CNNDailymail()\n",
        "train_dataset = get_dataset(processor,tokenizer,\"./\",max_seq_length_src,max_seq_length_tgt,4,'train',\"./\")\n",
        "eval_dataset = get_dataset(processor,tokenizer,\"./\",max_seq_length_src,max_seq_length_tgt,4,'eval',\"./\")\n",
        "test_dataset = get_dataset(processor,tokenizer,\"./\",max_seq_length_src,max_seq_length_tgt,4,'test',\"./\")\n",
        "#del processor"
      ],
      "execution_count": 13,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:distributed mode is not enabled.\n",
            "WARNING:tensorflow:From <ipython-input-12-3918a62c9cd3>:297: map_and_batch (from tensorflow.contrib.data.python.ops.batching) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Use `tf.data.experimental.map_and_batch(...)`.\n",
            "{'src_input_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:0' shape=(512,) dtype=int64>, 'src_input_mask': <tf.Tensor 'ParseSingleExample/ParseSingleExample:1' shape=(512,) dtype=int64>, 'src_segment_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:2' shape=(512,) dtype=int64>, 'tgt_input_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:3' shape=(400,) dtype=int64>, 'tgt_input_mask': <tf.Tensor 'ParseSingleExample/ParseSingleExample:4' shape=(400,) dtype=int64>, 'tgt_labels': <tf.Tensor 'ParseSingleExample/ParseSingleExample:5' shape=(400,) dtype=int64>}\n",
            "dict_keys(['src_input_ids', 'src_input_mask', 'src_segment_ids', 'tgt_input_ids', 'tgt_input_mask', 'tgt_labels'])\n",
            "{'src_input_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:0' shape=(512,) dtype=int64>, 'src_input_mask': <tf.Tensor 'ParseSingleExample/ParseSingleExample:1' shape=(512,) dtype=int64>, 'src_segment_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:2' shape=(512,) dtype=int64>, 'tgt_input_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:3' shape=(400,) dtype=int64>, 'tgt_input_mask': <tf.Tensor 'ParseSingleExample/ParseSingleExample:4' shape=(400,) dtype=int64>, 'tgt_labels': <tf.Tensor 'ParseSingleExample/ParseSingleExample:5' shape=(400,) dtype=int64>}\n",
            "dict_keys(['src_input_ids', 'src_input_mask', 'src_segment_ids', 'tgt_input_ids', 'tgt_input_mask', 'tgt_labels'])\n",
            "{'src_input_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:0' shape=(512,) dtype=int64>, 'src_input_mask': <tf.Tensor 'ParseSingleExample/ParseSingleExample:1' shape=(512,) dtype=int64>, 'src_segment_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:2' shape=(512,) dtype=int64>, 'tgt_input_ids': <tf.Tensor 'ParseSingleExample/ParseSingleExample:3' shape=(400,) dtype=int64>, 'tgt_input_mask': <tf.Tensor 'ParseSingleExample/ParseSingleExample:4' shape=(400,) dtype=int64>, 'tgt_labels': <tf.Tensor 'ParseSingleExample/ParseSingleExample:5' shape=(400,) dtype=int64>}\n",
            "dict_keys(['src_input_ids', 'src_input_mask', 'src_segment_ids', 'tgt_input_ids', 'tgt_input_mask', 'tgt_labels'])\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "kDhy0XGlIcY2",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "del processor"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "CBXxrBteAuVj",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "424fc4a8-d889-412f-ae34-766b961ce7ab"
      },
      "cell_type": "code",
      "source": [
        "vocab_size"
      ],
      "execution_count": 15,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "30522"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 15
        }
      ]
    },
    {
      "metadata": {
        "id": "lfw2JV11jsad",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "#inputs to the model\n",
        "src_input_ids = tf.placeholder(tf.int64, shape=(None, None))\n",
        "src_segment_ids = tf.placeholder(tf.int64, shape=(None, None))\n",
        "tgt_input_ids = tf.placeholder(tf.int64, shape=(None, None))\n",
        "tgt_segment_ids = tf.placeholder(tf.int64, shape=(None, None))\n",
        "\n",
        "batch_size = tf.shape(src_input_ids)[0]\n",
        "\n",
        "src_input_length = tf.reduce_sum(1 - tf.to_int32(tf.equal(src_input_ids, 0)),\n",
        "                             axis=1)\n",
        "tgt_input_length = tf.reduce_sum(1 - tf.to_int32(tf.equal(src_input_ids, 0)),\n",
        "                             axis=1)\n",
        "\n",
        "labels = tf.placeholder(tf.int64, shape=(None, None))\n",
        "is_target = tf.to_float(tf.not_equal(labels, 0))\n",
        "\n",
        "\n",
        "global_step = tf.Variable(0, dtype=tf.int64, trainable=False)\n",
        "learning_rate = tf.placeholder(tf.float64, shape=(), name='lr')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "jTFde06_kACm",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "#create the iterator \n",
        "iterator = tx.data.FeedableDataIterator({\n",
        "        'train': train_dataset, 'eval': eval_dataset, 'test': test_dataset})\n",
        "\n",
        "batch = iterator.get_next()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "XSrDO5YBkPYh",
        "colab_type": "code",
        "outputId": "a07623be-20ff-433c-a256-95f33b49b531",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 51
        }
      },
      "cell_type": "code",
      "source": [
        "#encoder Bert model\n",
        "print(\"Intializing the Bert Encoder Graph\")\n",
        "with tf.variable_scope('bert'):\n",
        "        embedder = tx.modules.WordEmbedder(\n",
        "            vocab_size=bert_config.vocab_size,\n",
        "            hparams=bert_config.embed)\n",
        "        word_embeds = embedder(src_input_ids)\n",
        "\n",
        "        # Creates segment embeddings for each type of tokens.\n",
        "        segment_embedder = tx.modules.WordEmbedder(\n",
        "            vocab_size=bert_config.type_vocab_size,\n",
        "            hparams=bert_config.segment_embed)\n",
        "        segment_embeds = segment_embedder(src_segment_ids)\n",
        "\n",
        "        input_embeds = word_embeds + segment_embeds\n",
        "\n",
        "        # The BERT model (a TransformerEncoder)\n",
        "        encoder = tx.modules.TransformerEncoder(hparams=bert_config.encoder)\n",
        "        encoder_output = encoder(input_embeds, src_input_length)\n",
        "        \n",
        "        # Builds layers for downstream classification, which is also initialized\n",
        "        # with BERT pre-trained checkpoint.\n",
        "        with tf.variable_scope(\"pooler\"):\n",
        "            # Uses the projection of the 1st-step hidden vector of BERT output\n",
        "            # as the representation of the sentence\n",
        "            bert_sent_hidden = tf.squeeze(encoder_output[:, 0:1, :], axis=1)\n",
        "            bert_sent_output = tf.layers.dense(\n",
        "                bert_sent_hidden, config_downstream.hidden_dim,\n",
        "                activation=tf.tanh)\n",
        "            output = tf.layers.dropout(\n",
        "                bert_sent_output, rate=0.1, training=tx.global_mode_train())\n",
        "\n",
        "\n",
        "print(\"loading the bert pretrained weights\")\n",
        "# Loads pretrained BERT model parameters\n",
        "init_checkpoint = os.path.join(bert_pretrain_dir, 'bert_model.ckpt')\n",
        "#init_checkpoint = \"gs://cloud-tpu-checkpoints/bert/uncased_L-12_H-768_A-12/bert_model.ckpt\"\n",
        "model_utils.init_bert_checkpoint(init_checkpoint)"
      ],
      "execution_count": 18,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Intializing the Bert Encoder Graph\n",
            "loading the bert pretrained weights\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "C5m48bu5kVXm",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "#decoder part and mle losss\n",
        "tgt_embedding = tf.concat(\n",
        "    [tf.zeros(shape=[1, embedder.dim]), embedder.embedding[1:, :]], axis=0)\n",
        "\n",
        "decoder = tx.modules.TransformerDecoder(embedding=tgt_embedding,\n",
        "                             hparams=dcoder_config)\n",
        "# For training\n",
        "outputs = decoder(\n",
        "    memory=encoder_output,\n",
        "    memory_sequence_length=src_input_length,\n",
        "    inputs=embedder(tgt_input_ids),\n",
        "    sequence_length=tgt_input_length,\n",
        "    decoding_strategy='train_greedy',\n",
        "    mode=tf.estimator.ModeKeys.TRAIN\n",
        ")\n",
        "\n",
        "mle_loss = transformer_utils.smoothing_cross_entropy(\n",
        "        outputs.logits, labels, vocab_size, loss_label_confidence)\n",
        "mle_loss = tf.reduce_sum(mle_loss * is_target) / tf.reduce_sum(is_target)\n",
        "\n",
        "train_op = tx.core.get_train_op(\n",
        "        mle_loss,\n",
        "        learning_rate=learning_rate,\n",
        "        global_step=global_step,\n",
        "        hparams=opt)\n",
        "\n",
        "tf.summary.scalar('lr', learning_rate)\n",
        "tf.summary.scalar('mle_loss', mle_loss)\n",
        "summary_merged = tf.summary.merge_all()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "sfDuR-SVkdhF",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "#prediction \n",
        "start_tokens = tf.fill([tx.utils.get_batch_size(src_input_ids)],\n",
        "                       bos_token_id)\n",
        "predictions = decoder(\n",
        "    memory=encoder_output,\n",
        "    memory_sequence_length=src_input_length,\n",
        "    decoding_strategy='infer_greedy',\n",
        "    beam_width=beam_width,\n",
        "    alpha=alpha,\n",
        "    start_tokens=start_tokens,\n",
        "    end_token=eos_token_id,\n",
        "    max_decoding_length=400,\n",
        "    mode=tf.estimator.ModeKeys.PREDICT\n",
        ")\n",
        "if beam_width <= 1:\n",
        "    inferred_ids = predictions[0].sample_id\n",
        "else:\n",
        "    # Uses the best sample by beam search\n",
        "    inferred_ids = predictions['sample_id'][:, :, 0]\n",
        "\n",
        "\n",
        "saver = tf.train.Saver(max_to_keep=5)\n",
        "best_results = {'score': 0, 'epoch': -1}"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "TCmgMIV6kzO4",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "def _train_epoch(sess, epoch, step, smry_writer):\n",
        "        \n",
        "            \n",
        "        fetches = {\n",
        "            'step': global_step,\n",
        "            'train_op': train_op,\n",
        "            'smry': summary_merged,\n",
        "            'loss': mle_loss,\n",
        "        }\n",
        "\n",
        "        while True:\n",
        "            try:\n",
        "              feed_dict = {\n",
        "                iterator.handle: iterator.get_handle(sess, 'train'),\n",
        "                tx.global_mode(): tf.estimator.ModeKeys.TRAIN,\n",
        "              }\n",
        "              op = sess.run([batch],feed_dict)\n",
        "              feed_dict = {\n",
        "                   src_input_ids:op[0]['src_input_ids'],\n",
        "                   src_segment_ids : op[0]['src_segment_ids'],\n",
        "                   tgt_input_ids:op[0]['tgt_input_ids'],\n",
        "\n",
        "                   labels:op[0]['tgt_labels'],\n",
        "                   learning_rate: utils.get_lr(step, lr),\n",
        "                   tx.global_mode(): tf.estimator.ModeKeys.TRAIN\n",
        "                }\n",
        "\n",
        "\n",
        "              fetches_ = sess.run(fetches, feed_dict=feed_dict)\n",
        "              step, loss = fetches_['step'], fetches_['loss']\n",
        "              if step and step % display_steps == 0:\n",
        "                  logger.info('step: %d, loss: %.4f', step, loss)\n",
        "                  print('step: %d, loss: %.4f' % (step, loss))\n",
        "                  smry_writer.add_summary(fetches_['smry'], global_step=step)\n",
        "\n",
        "              if step and step % 1000 == 0:\n",
        "                  model_path = \"gs://bert_summ/models/model_\"+str(step)+\".ckpt\"\n",
        "                  logger.info('saving model to %s', model_path)\n",
        "                  print('saving model to %s' % model_path)\n",
        "                  saver.save(sess, model_path)\n",
        "              if step and step % eval_steps == 0:\n",
        "                  _eval_epoch(sess, epoch, mode='eval')\n",
        "            except tf.errors.OutOfRangeError:\n",
        "                break\n",
        "\n",
        "        return step"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "60_hbYdak5rd",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "def _eval_epoch(sess, epoch, mode):\n",
        "\n",
        "        references, hypotheses = [], []\n",
        "        bsize = test_batch_size\n",
        "        fetches = {\n",
        "                'inferred_ids': inferred_ids,\n",
        "            }\n",
        "        bno=0\n",
        "        while True:\n",
        "            \n",
        "            #print(\"Temp\",temp)\n",
        "            try:\n",
        "              print(\"Batch\",bno)\n",
        "              feed_dict = {\n",
        "              iterator.handle: iterator.get_handle(sess, 'eval'),\n",
        "              tx.global_mode(): tf.estimator.ModeKeys.EVAL,\n",
        "              }\n",
        "              op = sess.run([batch],feed_dict)\n",
        "              feed_dict = {\n",
        "                   src_input_ids:op[0]['src_input_ids'],\n",
        "                   src_segment_ids : op[0]['src_segment_ids'],\n",
        "                   tx.global_mode(): tf.estimator.ModeKeys.EVAL\n",
        "              }\n",
        "              fetches_ = sess.run(fetches, feed_dict=feed_dict)\n",
        "              labels = op[0]['tgt_labels']\n",
        "              hypotheses.extend(h.tolist() for h in fetches_['inferred_ids'])\n",
        "              references.extend(r.tolist() for r in labels)\n",
        "              hypotheses = utils.list_strip_eos(hypotheses, eos_token_id)\n",
        "              references = utils.list_strip_eos(references, eos_token_id)\n",
        "              bno = bno+1\n",
        "              \n",
        "            except tf.errors.OutOfRangeError:\n",
        "                break\n",
        "\n",
        "\n",
        "        if mode == 'eval':\n",
        "            # Writes results to files to evaluate BLEU\n",
        "            # For 'eval' mode, the BLEU is based on token ids (rather than\n",
        "            # text tokens) and serves only as a surrogate metric to monitor\n",
        "            # the training process\n",
        "            #fname = os.path.join(model_dir, 'tmp.eval')\n",
        "            fname = \"./tmp.eval\"\n",
        "            #fname = \"gs://bert_summ/models/tmp.eval\"\n",
        "            hypotheses = tx.utils.str_join(hypotheses)\n",
        "            references = tx.utils.str_join(references)\n",
        "            hyp_fn, ref_fn = tx.utils.write_paired_text(\n",
        "                hypotheses, references, fname, mode='s')\n",
        "            eval_bleu = bleu_wrapper(ref_fn, hyp_fn, case_sensitive=True)\n",
        "            eval_bleu = 100. * eval_bleu\n",
        "            logger.info('epoch: %d, eval_bleu %.4f', epoch, eval_bleu)\n",
        "            print('epoch: %d, eval_bleu %.4f' % (epoch, eval_bleu))\n",
        "\n",
        "            if eval_bleu > best_results['score']:\n",
        "                logger.info('epoch: %d, best bleu: %.4f', epoch, eval_bleu)\n",
        "                best_results['score'] = eval_bleu\n",
        "                best_results['epoch'] = epoch\n",
        "                #model_path = os.path.join(model_dir, 'best-model.ckpt')\n",
        "                model_path = \"gs://bert_summ/models/best-model.ckpt\"\n",
        "                logger.info('saving model to %s', model_path)\n",
        "                print('saving model to %s' % model_path)\n",
        "                saver.save(sess, model_path)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "v9b6ghCsnT90",
        "colab_type": "code",
        "outputId": "bc4a0e8b-e9ad-408c-a92f-c6b4455b2d03",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 357
        }
      },
      "cell_type": "code",
      "source": [
        "#tx.utils.maybe_create_dir(model_dir)\n",
        "#logging_file = os.path.join(model_dir, 'logging.txt')\n",
        "\n",
        "model_dir = \"gs://bert_summ/models/\"\n",
        "logging_file= \"logging.txt\"\n",
        "logger = utils.get_logger(logging_file)\n",
        "with tf.Session() as sess:\n",
        "    sess.run(tf.global_variables_initializer())\n",
        "    sess.run(tf.local_variables_initializer())\n",
        "    sess.run(tf.tables_initializer())\n",
        "\n",
        "    smry_writer = tf.summary.FileWriter(model_dir, graph=sess.graph)\n",
        "\n",
        "    if run_mode == 'train_and_evaluate':\n",
        "        logger.info('Begin running with train_and_evaluate mode')\n",
        "\n",
        "        if tf.train.latest_checkpoint(model_dir) is not None:\n",
        "            logger.info('Restore latest checkpoint in %s' % model_dir)\n",
        "            saver.restore(sess, tf.train.latest_checkpoint(model_dir))\n",
        "        \n",
        "        iterator.initialize_dataset(sess)\n",
        "\n",
        "        step = 5000\n",
        "        for epoch in range(max_train_epoch):\n",
        "          iterator.restart_dataset(sess, 'train')\n",
        "          step = _train_epoch(sess, epoch, step, smry_writer)\n",
        "\n",
        "    elif run_mode == 'test':\n",
        "        logger.info('Begin running with test mode')\n",
        "\n",
        "        logger.info('Restore latest checkpoint in %s' % model_dir)\n",
        "        saver.restore(sess, tf.train.latest_checkpoint(model_dir))\n",
        "\n",
        "        _eval_epoch(sess, 0, mode='test')\n",
        "\n",
        "    else:\n",
        "        raise ValueError('Unknown mode: {}'.format(run_mode))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Restoring parameters from gs://bert_summ/models/model_5000.ckpt\n",
            "step: 5100, loss: 7.0686\n",
            "step: 5200, loss: 6.7414\n",
            "step: 5300, loss: 6.4176\n",
            "step: 5400, loss: 6.9609\n",
            "step: 5500, loss: 7.0777\n",
            "step: 5600, loss: 6.8462\n",
            "step: 5700, loss: 6.8764\n",
            "step: 5800, loss: 7.2216\n",
            "step: 5900, loss: 6.6034\n",
            "step: 6000, loss: 6.8505\n",
            "saving model to gs://bert_summ/models/model_6000.ckpt\n",
            "step: 6100, loss: 6.7107\n",
            "step: 6200, loss: 6.9797\n",
            "step: 6300, loss: 7.2937\n",
            "step: 6400, loss: 6.9824\n",
            "step: 6500, loss: 7.0897\n",
            "step: 6600, loss: 5.2173\n",
            "step: 6700, loss: 7.3187\n",
            "step: 6800, loss: 7.0490\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "L35eRRNKSoOV",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}

================================================
FILE: Inference.py
================================================
from flask import Flask,request,render_template
import requests 
import json
from collections import OrderedDict
import os
import numpy as np
import tensorflow as tf

app =Flask(__name__)

import sys

if not 'texar_repo' in sys.path:
  sys.path += ['texar_repo']

from config import *
from model import *
from preprocess import *


start_tokens = tf.fill([tx.utils.get_batch_size(src_input_ids)],
                       bos_token_id)
predictions = decoder(
    memory=encoder_output,
    memory_sequence_length=src_input_length,
    decoding_strategy='infer_greedy',
    beam_width=beam_width,
    alpha=alpha,
    start_tokens=start_tokens,
    end_token=eos_token_id,
    max_decoding_length=400,
    mode=tf.estimator.ModeKeys.PREDICT
)
if beam_width <= 1:
    inferred_ids = predictions[0].sample_id
else:
    # Uses the best sample by beam search
    inferred_ids = predictions['sample_id'][:, :, 0]




tokenizer = tokenization.FullTokenizer(
      vocab_file=os.path.join(bert_pretrain_dir, 'vocab.txt'),
      do_lower_case=True)


sess = tf.Session()
def infer_single_example(story,actual_summary,tokenizer):
      example = {"src_txt":story,
      "tgt_txt":actual_summary
      }
      features = convert_single_example(1,example,max_seq_length_src,max_seq_length_tgt,
         tokenizer)
      feed_dict = {
      src_input_ids:np.array(features.src_input_ids).reshape(-1,1),
      src_segment_ids : np.array(features.src_segment_ids).reshape(-1,1)

      }

      references, hypotheses = [], []
      fetches = {
      'inferred_ids': inferred_ids,
      }
      fetches_ = sess.run(fetches, feed_dict=feed_dict)
      labels = np.array(features.tgt_labels).reshape(-1,1)
      hypotheses.extend(h.tolist() for h in fetches_['inferred_ids'])
      references.extend(r.tolist() for r in labels)
      hypotheses = utils.list_strip_eos(hypotheses, eos_token_id)
      references = utils.list_strip_eos(references[0], eos_token_id)
      hwords = tokenizer.convert_ids_to_tokens(hypotheses[0])
      rwords = tokenizer.convert_ids_to_tokens(references[0])

      hwords = tx.utils.str_join(hwords).replace(" ##","")
      rwords = tx.utils.str_join(rwords).replace(" ##","")
      print("Original",rwords)
      print("Generated",hwords)
      return hwords

@app.route("/results",methods=["GET","POST"])
def results():
	story = request.form['story']
	summary = request.form['summary']
	hwords = infer_single_example(story,summary,tokenizer)
	return hwords


if __name__=="__main__":
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    sess.run(tf.tables_initializer())
    saver.restore(sess, tf.train.latest_checkpoint(model_dir))
    app.run(host="0.0.0.0",port=1118,debug=False)
    




================================================
FILE: Readme.md
================================================
<h3>Abstractive summarization using bert as encoder and transformer decoder</h3>

I have used a text generation library called Texar , Its a beautiful library with a lot of abstractions, i would say it to be 
scikit learn for text generation problems.

The main idea behind this architecture is to use the transfer learning from pretrained BERT a masked language model ,
I have replaced the Encoder part with BERT Encoder and the deocder is trained from the scratch.

One of the advantages of using Transfomer Networks is training is much faster than LSTM based models as we elimanate sequential behaviour in Transformer models.

Transformer based models generate more gramatically correct  and coherent sentences.


<h3>To run the model</h3>
<pre>
wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip 
unzip uncased_L-12_H-768_A-12.zip

Place the story and summary files under data folder with the following names.
-train_story.txt
-train_summ.txt
-eval_story.txt
-eval_summ.txt
each story and summary must be in a single line (see sample text given.)


Step1:
Run Preprocessing
<b>python preprocess.py</b>

This creates two tfrecord files under the data folder.

Step 2:
<b>python main.py</b>

Configurations for the model can be changes from config.py file

Step 3:
Inference 
Run the command <b>python inference.py</b>
This code runs a flask server 
Use postman to send the POST request @http://your_ip_address:1118/results
with two form parameters story,summary



</pre>




================================================
FILE: bnb_4bit_training.ipynb
================================================
{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": [],
      "gpuType": "T4",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    },
    "accelerator": "GPU",
    "gpuClass": "standard",
    "widgets": {
      "application/vnd.jupyter.widget-state+json": {
        "4dda8029a1c54f9dac38834fc49d12b7": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_e0e252da64054d6eb2f661b985aceab8",
              "IPY_MODEL_43a0c376ab8d40619c84e50c1fae1bf3",
              "IPY_MODEL_e7dc091b7bd54c439aaea379a15bdb6a"
            ],
            "layout": "IPY_MODEL_2fff8a5907fa44248e3b57dc4051236c"
          }
        },
        "e0e252da64054d6eb2f661b985aceab8": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_73f8063f8b4c4c809ce4e410ccafee96",
            "placeholder": "​",
            "style": "IPY_MODEL_11566fa4205343cb9378f078b5b18f98",
            "value": "Downloading (…)okenizer_config.json: 100%"
          }
        },
        "43a0c376ab8d40619c84e50c1fae1bf3": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_18f17e571bb3482a9052b1b268abacde",
            "max": 156,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_96aa7b38a32a4f4db15ba6841c750fd8",
            "value": 156
          }
        },
        "e7dc091b7bd54c439aaea379a15bdb6a": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_9bca368a376542d2b60594c47050470d",
            "placeholder": "​",
            "style": "IPY_MODEL_4480a062b90d4d13b12c3ecc0b832d0e",
            "value": " 156/156 [00:00&lt;00:00, 10.6kB/s]"
          }
        },
        "2fff8a5907fa44248e3b57dc4051236c": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "73f8063f8b4c4c809ce4e410ccafee96": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "11566fa4205343cb9378f078b5b18f98": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "18f17e571bb3482a9052b1b268abacde": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "96aa7b38a32a4f4db15ba6841c750fd8": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "9bca368a376542d2b60594c47050470d": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "4480a062b90d4d13b12c3ecc0b832d0e": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "3daf35d9c166402d94afcfd111b63807": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_44400a34234341f7a182d99ef8657dc8",
              "IPY_MODEL_a7f997badd8d47729dac8cd0aed205dc",
              "IPY_MODEL_57166e4e5c024e1cacc63f2bbed51560"
            ],
            "layout": "IPY_MODEL_0d8925b6952e4c6583b262124f28febc"
          }
        },
        "44400a34234341f7a182d99ef8657dc8": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_26d72ee7eff34d74986f596cf9c6a557",
            "placeholder": "​",
            "style": "IPY_MODEL_5f9382acf893491ab582a7282edff80d",
            "value": "Downloading (…)olve/main/vocab.json: 100%"
          }
        },
        "a7f997badd8d47729dac8cd0aed205dc": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_f7aff94ef61047cbaa55eeb098d205b5",
            "max": 1077392,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_45b8e4b9a2af4a72b08f0e2a3c78c63c",
            "value": 1077392
          }
        },
        "57166e4e5c024e1cacc63f2bbed51560": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_2385294782954692a455fb65e9b59733",
            "placeholder": "​",
            "style": "IPY_MODEL_1371bd3069e541d3b5e40813bac2c490",
            "value": " 1.08M/1.08M [00:00&lt;00:00, 5.60MB/s]"
          }
        },
        "0d8925b6952e4c6583b262124f28febc": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "26d72ee7eff34d74986f596cf9c6a557": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "5f9382acf893491ab582a7282edff80d": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "f7aff94ef61047cbaa55eeb098d205b5": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "45b8e4b9a2af4a72b08f0e2a3c78c63c": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "2385294782954692a455fb65e9b59733": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "1371bd3069e541d3b5e40813bac2c490": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "083f6c3c60ad4370b6587761b49e5654": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_8366012339cf4c1c8ef44ba3341df1b3",
              "IPY_MODEL_139901b773f141b196281de1c23f40df",
              "IPY_MODEL_7e0ae4a2ebe446b683e0f8be4a70dfd5"
            ],
            "layout": "IPY_MODEL_cc64b611043840ea9a6c1421d7327bb0"
          }
        },
        "8366012339cf4c1c8ef44ba3341df1b3": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_d687111284984c0da14ff9f534b53c96",
            "placeholder": "​",
            "style": "IPY_MODEL_d7ae79cdb87146729acac1b5f2f70263",
            "value": "Downloading (…)olve/main/merges.txt: 100%"
          }
        },
        "139901b773f141b196281de1c23f40df": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_fe2e1e6d423c46c19ca6abc4bc397860",
            "max": 456583,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_2977c91d68ec48b09605472e2b46c62c",
            "value": 456583
          }
        },
        "7e0ae4a2ebe446b683e0f8be4a70dfd5": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_3642f2a6cbd541408e3a88745f597f38",
            "placeholder": "​",
            "style": "IPY_MODEL_756359b628f74b4ebac5392e09a03e83",
            "value": " 457k/457k [00:00&lt;00:00, 7.25MB/s]"
          }
        },
        "cc64b611043840ea9a6c1421d7327bb0": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "d687111284984c0da14ff9f534b53c96": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "d7ae79cdb87146729acac1b5f2f70263": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "fe2e1e6d423c46c19ca6abc4bc397860": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "2977c91d68ec48b09605472e2b46c62c": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "3642f2a6cbd541408e3a88745f597f38": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "756359b628f74b4ebac5392e09a03e83": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "af26a845a28e47a98a42c2344b20430d": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_559a87b3917a47738cd4d0172dc276cd",
              "IPY_MODEL_c41ee826c58a4542a435f41a8cf2ed1c",
              "IPY_MODEL_3e70a3c512a04c25a90217466f0b904f"
            ],
            "layout": "IPY_MODEL_b2e03d1e18a546a3bf7edc942a9ce2ee"
          }
        },
        "559a87b3917a47738cd4d0172dc276cd": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_29fa4f908d07492387dc9f7607f55312",
            "placeholder": "​",
            "style": "IPY_MODEL_76dea45f667b40278e34647e6b6dbeb1",
            "value": "Downloading (…)/main/tokenizer.json: 100%"
          }
        },
        "c41ee826c58a4542a435f41a8cf2ed1c": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_56649cdda2034e0189aec800c6d8f4af",
            "max": 2113710,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_4fe01ea1a4c04d2fa9851b9e515aa79a",
            "value": 2113710
          }
        },
        "3e70a3c512a04c25a90217466f0b904f": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_e1851b9cf0124fa3b7d876c40244c61c",
            "placeholder": "​",
            "style": "IPY_MODEL_9560bb9113724753a768d58ca0367046",
            "value": " 2.11M/2.11M [00:00&lt;00:00, 10.4MB/s]"
          }
        },
        "b2e03d1e18a546a3bf7edc942a9ce2ee": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "29fa4f908d07492387dc9f7607f55312": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "76dea45f667b40278e34647e6b6dbeb1": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "56649cdda2034e0189aec800c6d8f4af": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "4fe01ea1a4c04d2fa9851b9e515aa79a": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "e1851b9cf0124fa3b7d876c40244c61c": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "9560bb9113724753a768d58ca0367046": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "46403b1a813e4e9d96718ec9c85a5065": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_de120e4ae5d7455390945a2df5791743",
              "IPY_MODEL_92f04cfa9cd04e34aa7ef73499f0c62b",
              "IPY_MODEL_cb4576abb0e4409aa4125a1eb3612cd7"
            ],
            "layout": "IPY_MODEL_8ffb76c7b6ba46698dbf1c7dedfce105"
          }
        },
        "de120e4ae5d7455390945a2df5791743": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_974775d2cc954c9a83942ce57e263a95",
            "placeholder": "​",
            "style": "IPY_MODEL_7ebe4b354fc04b099ff8064700d034c4",
            "value": "Downloading (…)cial_tokens_map.json: 100%"
          }
        },
        "92f04cfa9cd04e34aa7ef73499f0c62b": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_171c629395f14252a0c9f0d52ddde7f6",
            "max": 90,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_6ffb160a906143fca91bd8a62736b966",
            "value": 90
          }
        },
        "cb4576abb0e4409aa4125a1eb3612cd7": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_20c4ed70df98425b878c8bc0aeb935b9",
            "placeholder": "​",
            "style": "IPY_MODEL_cf87f95a7c514168a260c0302dd05dc9",
            "value": " 90.0/90.0 [00:00&lt;00:00, 1.96kB/s]"
          }
        },
        "8ffb76c7b6ba46698dbf1c7dedfce105": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "974775d2cc954c9a83942ce57e263a95": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "7ebe4b354fc04b099ff8064700d034c4": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "171c629395f14252a0c9f0d52ddde7f6": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "6ffb160a906143fca91bd8a62736b966": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "20c4ed70df98425b878c8bc0aeb935b9": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "cf87f95a7c514168a260c0302dd05dc9": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "3bcad19a79064a7dbbabaabb0f1c8a9f": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_a4f10c97b95b4b19b2a9a02fbc60fa29",
              "IPY_MODEL_53fc8538b34c4c34809bbc7c75bfc029",
              "IPY_MODEL_f75760a7141245a0a8881e3d7476c877"
            ],
            "layout": "IPY_MODEL_87855b48d3aa41e7b38545aba53d71be"
          }
        },
        "a4f10c97b95b4b19b2a9a02fbc60fa29": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_7ea2647caa7d4b14989e6a4f795b409b",
            "placeholder": "​",
            "style": "IPY_MODEL_a173bccd53974a248adb99dc1bd5b4d2",
            "value": "Downloading (…)lve/main/config.json: 100%"
          }
        },
        "53fc8538b34c4c34809bbc7c75bfc029": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_3f9845343c7f4ffea9f6640c7f0ec9ab",
            "max": 613,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_b3a0d237f6fe4b6dae958fef4765786d",
            "value": 613
          }
        },
        "f75760a7141245a0a8881e3d7476c877": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_428c0f8165604415af070ee916ffa02a",
            "placeholder": "​",
            "style": "IPY_MODEL_b427fd7a2fdc49388e34c1e390c13260",
            "value": " 613/613 [00:00&lt;00:00, 19.2kB/s]"
          }
        },
        "87855b48d3aa41e7b38545aba53d71be": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "7ea2647caa7d4b14989e6a4f795b409b": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "a173bccd53974a248adb99dc1bd5b4d2": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "3f9845343c7f4ffea9f6640c7f0ec9ab": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "b3a0d237f6fe4b6dae958fef4765786d": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "428c0f8165604415af070ee916ffa02a": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "b427fd7a2fdc49388e34c1e390c13260": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "3527915a5473497b81ccbafafc6f7345": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_f5c40d1c5de943d4bce6939895d0ebf5",
              "IPY_MODEL_72e94f7605c8452081f13051fe4eaba1",
              "IPY_MODEL_0c2a80f722f54658b6d25079377b68ff"
            ],
            "layout": "IPY_MODEL_85165564577b4e3ea86d8e4113425133"
          }
        },
        "f5c40d1c5de943d4bce6939895d0ebf5": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_07a067b7fe0049088cd2049382701467",
            "placeholder": "​",
            "style": "IPY_MODEL_9e6e8895372e450e9a9c926bd9d66bfc",
            "value": "Downloading (…)model.bin.index.json: 100%"
          }
        },
        "72e94f7605c8452081f13051fe4eaba1": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_3dc35e22727744768f2c113f9a5358a9",
            "max": 57712,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_e97ff06782a24e8897c0f6b4acdc3579",
            "value": 57712
          }
        },
        "0c2a80f722f54658b6d25079377b68ff": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_cc5082778cd743b7b494485050473841",
            "placeholder": "​",
            "style": "IPY_MODEL_97e06f7effa149d8afe85c153ede1d29",
            "value": " 57.7k/57.7k [00:00&lt;00:00, 675kB/s]"
          }
        },
        "85165564577b4e3ea86d8e4113425133": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "07a067b7fe0049088cd2049382701467": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "9e6e8895372e450e9a9c926bd9d66bfc": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "3dc35e22727744768f2c113f9a5358a9": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "e97ff06782a24e8897c0f6b4acdc3579": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "cc5082778cd743b7b494485050473841": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "97e06f7effa149d8afe85c153ede1d29": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "34283ecf8ccd4cf69c7292b120e2cb43": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_685bfd23604147978b3bb8ea1d758107",
              "IPY_MODEL_9646be0b72a44f4ba6cfc68f2a522028",
              "IPY_MODEL_2e523299ecbb4ef3adaa95f8dd2ab072"
            ],
            "layout": "IPY_MODEL_b1d721eb56d243bb865e90b61c4a9785"
          }
        },
        "685bfd23604147978b3bb8ea1d758107": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_df1a778a5d254fd997e6aa992fa3d537",
            "placeholder": "​",
            "style": "IPY_MODEL_28e540c117fb4766a344a5a1a887115e",
            "value": "Downloading shards: 100%"
          }
        },
        "9646be0b72a44f4ba6cfc68f2a522028": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_1cd4abd9506d4f329d7dcf58e7008ba7",
            "max": 46,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_6c5af425aaf44adea4e2027ba6062b9c",
            "value": 46
          }
        },
        "2e523299ecbb4ef3adaa95f8dd2ab072": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_b9a91217395a4550a6012a06efd29e7c",
            "placeholder": "​",
            "style": "IPY_MODEL_f9c98867724e4dfda3b8918803c5bb1e",
            "value": " 46/46 [04:27&lt;00:00,  5.07s/it]"
          }
        },
        "b1d721eb56d243bb865e90b61c4a9785": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "df1a778a5d254fd997e6aa992fa3d537": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "28e540c117fb4766a344a5a1a887115e": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "1cd4abd9506d4f329d7dcf58e7008ba7": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "6c5af425aaf44adea4e2027ba6062b9c": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "b9a91217395a4550a6012a06efd29e7c": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "f9c98867724e4dfda3b8918803c5bb1e": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "2c3a591792794cd98a2c7317c6f8dc7b": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_143568f550f34b1182a411497051308d",
              "IPY_MODEL_8e69d62d0dc14ce4b7b0269bf4e2eb78",
              "IPY_MODEL_5b22f2c813454e4787301c33dda5692f"
            ],
            "layout": "IPY_MODEL_02707d76344647ee9a5eb0e0205e2ff5"
          }
        },
        "143568f550f34b1182a411497051308d": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_3f1781d7d05f4affbb0cf69a2d83cb76",
            "placeholder": "​",
            "style": "IPY_MODEL_5cc81942930d4d5fb039adaf004227c0",
            "value": "Downloading (…)l-00001-of-00046.bin: 100%"
          }
        },
        "8e69d62d0dc14ce4b7b0269bf4e2eb78": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_0c59932a3cdd4931a337d18558ac93f0",
            "max": 925994625,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_3a2bcd6c344b43b7bedddd4b56e833c8",
            "value": 925994625
          }
        },
        "5b22f2c813454e4787301c33dda5692f": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_16a6b4938772480d9813c36caf450514",
            "placeholder": "​",
            "style": "IPY_MODEL_b757c51a4db04c3aaaf2f229d25ce2b6",
            "value": " 926M/926M [00:03&lt;00:00, 230MB/s]"
          }
        },
        "02707d76344647ee9a5eb0e0205e2ff5": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "3f1781d7d05f4affbb0cf69a2d83cb76": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "5cc81942930d4d5fb039adaf004227c0": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "0c59932a3cdd4931a337d18558ac93f0": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "3a2bcd6c344b43b7bedddd4b56e833c8": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "16a6b4938772480d9813c36caf450514": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "b757c51a4db04c3aaaf2f229d25ce2b6": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "56ec1bada613446ca99bf8a9c4ab3d69": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_8619ac7034eb43d0ba5a05f80f080786",
              "IPY_MODEL_128e986f97fb4bdcb0c15b60499c35b2",
              "IPY_MODEL_9d3f40e1e44e46439fc8c1247704772e"
            ],
            "layout": "IPY_MODEL_f0f7a5aa0cf04bdfaedd20e83919bd50"
          }
        },
        "8619ac7034eb43d0ba5a05f80f080786": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_220120f060f348b4ae9ff2beba7e5883",
            "placeholder": "​",
            "style": "IPY_MODEL_8559ddfaaaf140bd8b581cc7c3ed992b",
            "value": "Downloading (…)l-00002-of-00046.bin: 100%"
          }
        },
        "128e986f97fb4bdcb0c15b60499c35b2": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_ea5fb9415a3b40ae9f51354a1e2c37bb",
            "max": 910328184,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_e5127004f16d436cb7aa3f37fb4881f0",
            "value": 910328184
          }
        },
        "9d3f40e1e44e46439fc8c1247704772e": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_eb322e7aa135490c9da4c0c56a77e087",
            "placeholder": "​",
            "style": "IPY_MODEL_d30f71b6100d4227af6fc1b21eacd5fe",
            "value": " 910M/910M [00:07&lt;00:00, 51.3MB/s]"
          }
        },
        "f0f7a5aa0cf04bdfaedd20e83919bd50": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "220120f060f348b4ae9ff2beba7e5883": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "8559ddfaaaf140bd8b581cc7c3ed992b": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "ea5fb9415a3b40ae9f51354a1e2c37bb": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "e5127004f16d436cb7aa3f37fb4881f0": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "eb322e7aa135490c9da4c0c56a77e087": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "d30f71b6100d4227af6fc1b21eacd5fe": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "d36a9cd4315147329650fad25e46d671": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_55b6e0c2dbc3409e82e79427d639811f",
              "IPY_MODEL_174164ea94fa4188a5b2daa25533a006",
              "IPY_MODEL_243bcbec761c40c7906826ffdd9435fa"
            ],
            "layout": "IPY_MODEL_f734f41601204063aa733845a3b95d17"
          }
        },
        "55b6e0c2dbc3409e82e79427d639811f": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_567ad6f6c2e3403285dd164db46659d0",
            "placeholder": "​",
            "style": "IPY_MODEL_cc1c1c1104644aa381ce702aa24105b9",
            "value": "Downloading (…)l-00003-of-00046.bin: 100%"
          }
        },
        "174164ea94fa4188a5b2daa25533a006": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_be1890dbda57429cb2738cea2183555c",
            "max": 910328184,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_678a8fc1687749209ea33c4a1026a23a",
            "value": 910328184
          }
        },
        "243bcbec761c40c7906826ffdd9435fa": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_58db995285574441871ce58baedb5ab8",
            "placeholder": "​",
            "style": "IPY_MODEL_a5a438b2981a4b2d8405b9614f889193",
            "value": " 910M/910M [00:06&lt;00:00, 228MB/s]"
          }
        },
        "f734f41601204063aa733845a3b95d17": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "567ad6f6c2e3403285dd164db46659d0": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "cc1c1c1104644aa381ce702aa24105b9": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "be1890dbda57429cb2738cea2183555c": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "678a8fc1687749209ea33c4a1026a23a": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
          "model_module_version": "1.5.0",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "58db995285574441871ce58baedb5ab8": {
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
          "model_module_version": "1.2.0",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
 
Download .txt
gitextract_ydk6seuh/

├── .ipynb_checkpoints/
│   └── Untitled-checkpoint.ipynb
├── BERT_SUMM.ipynb
├── Inference.py
├── Readme.md
├── bnb_4bit_training.ipynb
├── config.py
├── data/
│   ├── eval.tf_record
│   ├── eval_story.txt
│   ├── eval_summ.txt
│   ├── train.tf_record
│   ├── train_story.txt
│   └── train_summ.txt
├── main.py
├── model.py
├── models/
│   └── logging.txt
├── preprocess.py
├── texar_repo/
│   ├── .gitignore
│   ├── .pylintrc
│   ├── .travis.yml
│   ├── CHANGELOG.md
│   ├── LICENSE
│   ├── README.md
│   ├── bin/
│   │   ├── average_checkpoints.py
│   │   ├── train.py
│   │   └── utils/
│   │       ├── README.md
│   │       ├── apply_bpe
│   │       ├── learn_bpe
│   │       ├── make_vocab.py
│   │       ├── multi-bleu.perl
│   │       ├── spm_decode
│   │       ├── spm_encode
│   │       └── spm_train
│   ├── config.py
│   ├── docs/
│   │   ├── Makefile
│   │   ├── _static/
│   │   │   └── css/
│   │   │       └── custom_theme.css
│   │   ├── code/
│   │   │   ├── agents.rst
│   │   │   ├── context.rst
│   │   │   ├── core.rst
│   │   │   ├── data.rst
│   │   │   ├── evals.rst
│   │   │   ├── hyperparams.rst
│   │   │   ├── losses.rst
│   │   │   ├── models.rst
│   │   │   ├── modules.rst
│   │   │   ├── run.rst
│   │   │   ├── txtgen.rst
│   │   │   └── utils.rst
│   │   ├── conf.py
│   │   ├── examples.md
│   │   ├── get_started.md
│   │   ├── index.rst
│   │   ├── make.bat
│   │   ├── requirements.txt
│   │   └── tutorials/
│   │       └── tutorial.rst
│   ├── examples/
│   │   ├── README.md
│   │   ├── bert/
│   │   │   ├── README.md
│   │   │   ├── bert_classifier_main.py
│   │   │   ├── bert_config_lib/
│   │   │   │   ├── README.md
│   │   │   │   ├── __init__.py
│   │   │   │   └── config_model_uncased_L-12_H-768_A-12.py
│   │   │   ├── config_classifier.py
│   │   │   ├── config_data_mrpc.py
│   │   │   ├── config_data_sst.py
│   │   │   └── utils/
│   │   │       ├── data_utils.py
│   │   │       ├── model_utils.py
│   │   │       └── tokenization.py
│   │   ├── distributed_gpu/
│   │   │   ├── README.md
│   │   │   ├── config_large.py
│   │   │   ├── config_medium.py
│   │   │   ├── config_small.py
│   │   │   ├── lm_ptb_distributed.py
│   │   │   └── ptb_reader.py
│   │   ├── hierarchical_dialog/
│   │   │   ├── README.md
│   │   │   ├── config_data.py
│   │   │   ├── config_model_biminor.py
│   │   │   ├── config_model_uniminor.py
│   │   │   ├── hred.py
│   │   │   └── sw_loader.py
│   │   ├── language_model_ptb/
│   │   │   ├── README.md
│   │   │   ├── config_large.py
│   │   │   ├── config_medium.py
│   │   │   ├── config_small.py
│   │   │   ├── lm_ptb.py
│   │   │   └── ptb_reader.py
│   │   ├── memory_network_lm/
│   │   │   ├── README.md
│   │   │   ├── config.py
│   │   │   ├── lm_ptb_memnet.py
│   │   │   └── ptb_reader.py
│   │   ├── rl_gym/
│   │   │   ├── README.md
│   │   │   ├── ac_cartpole.py
│   │   │   ├── config.py
│   │   │   ├── dqn_cartpole.py
│   │   │   └── pg_cartpole.py
│   │   ├── sentence_classifier/
│   │   │   ├── README.md
│   │   │   ├── clas_main.py
│   │   │   ├── config_kim.py
│   │   │   └── sst_data_preprocessor.py
│   │   ├── seq2seq_attn/
│   │   │   ├── README.md
│   │   │   ├── config_iwslt14.py
│   │   │   ├── config_model.py
│   │   │   ├── config_model_full.py
│   │   │   ├── config_toy_copy.py
│   │   │   ├── prepare_data.py
│   │   │   └── seq2seq_attn.py
│   │   ├── seq2seq_configs/
│   │   │   ├── README.md
│   │   │   ├── config_data_toy_copy.yml
│   │   │   ├── config_model_medium.yml
│   │   │   └── config_model_small.yml
│   │   ├── seq2seq_exposure_bias/
│   │   │   ├── README.md
│   │   │   ├── baseline_seq2seq_attn_main.py
│   │   │   ├── configs/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── config_giga.py
│   │   │   │   ├── config_iwslt14.py
│   │   │   │   └── config_model.py
│   │   │   ├── interpolation_decoder.py
│   │   │   ├── interpolation_helper.py
│   │   │   ├── interpolation_main.py
│   │   │   ├── raml_main.py
│   │   │   ├── requirements.txt
│   │   │   ├── scheduled_sampling_main.py
│   │   │   └── utils/
│   │   │       ├── prepare_data.py
│   │   │       └── raml_samples_generation/
│   │   │           ├── README.md
│   │   │           ├── gen_samples_giga.sh
│   │   │           ├── gen_samples_iwslt14.sh
│   │   │           ├── process_samples.py
│   │   │           ├── util.py
│   │   │           └── vocab.py
│   │   ├── seq2seq_rl/
│   │   │   ├── README.md
│   │   │   ├── config_iwslt14.py
│   │   │   ├── config_model.py
│   │   │   ├── config_toy_copy.py
│   │   │   ├── prepare_data.py
│   │   │   └── seq2seq_attn_pg.py
│   │   ├── seqgan/
│   │   │   ├── README.md
│   │   │   ├── config_coco.py
│   │   │   ├── config_ptb_large.py
│   │   │   ├── config_ptb_medium.py
│   │   │   ├── config_ptb_small.py
│   │   │   ├── data_utils.py
│   │   │   └── seqgan_train.py
│   │   ├── sequence_tagging/
│   │   │   ├── README.md
│   │   │   ├── config.py
│   │   │   ├── conll_reader.py
│   │   │   ├── conll_writer.py
│   │   │   ├── conlleval
│   │   │   ├── ner.py
│   │   │   └── scores.py
│   │   ├── text_style_transfer/
│   │   │   ├── README.md
│   │   │   ├── config.py
│   │   │   ├── ctrl_gen_model.py
│   │   │   ├── main.py
│   │   │   └── prepare_data.py
│   │   ├── torchtext/
│   │   │   ├── .gitignore
│   │   │   ├── README.md
│   │   │   ├── batchfirst_bptt.py
│   │   │   ├── config_small.py
│   │   │   ├── lm_torchtext.py
│   │   │   └── requirements.txt
│   │   ├── transformer/
│   │   │   ├── README.md
│   │   │   ├── bleu_tool.py
│   │   │   ├── config_iwslt15.py
│   │   │   ├── config_model.py
│   │   │   ├── config_wmt14.py
│   │   │   ├── preprocess_data.sh
│   │   │   ├── requirements.txt
│   │   │   ├── scripts/
│   │   │   │   ├── iwslt15_en_vi.sh
│   │   │   │   └── wmt14_en_de.sh
│   │   │   ├── transformer_main.py
│   │   │   └── utils/
│   │   │       ├── __init__.py
│   │   │       ├── data_utils.py
│   │   │       ├── preprocess.py
│   │   │       └── utils.py
│   │   └── vae_text/
│   │       ├── README.md
│   │       ├── config_lstm_ptb.py
│   │       ├── config_lstm_yahoo.py
│   │       ├── config_trans_ptb.py
│   │       ├── config_trans_yahoo.py
│   │       ├── prepare_data.py
│   │       └── vae_train.py
│   ├── requirements.txt
│   ├── setup.py
│   └── texar/
│       ├── __init__.py
│       ├── agents/
│       │   ├── __init__.py
│       │   ├── ac_agent.py
│       │   ├── agent_base.py
│       │   ├── agent_gym_utils.py
│       │   ├── agent_utils.py
│       │   ├── agent_utils_test.py
│       │   ├── dqn_agent.py
│       │   ├── episodic_agent_base.py
│       │   ├── pg_agent.py
│       │   ├── seq_agent_base.py
│       │   ├── seq_pg_agent.py
│       │   └── seq_pg_agent_test.py
│       ├── context.py
│       ├── context_test.py
│       ├── core/
│       │   ├── __init__.py
│       │   ├── explorations.py
│       │   ├── layers.py
│       │   ├── layers_test.py
│       │   ├── optimization.py
│       │   ├── optimization_test.py
│       │   └── replay_memories.py
│       ├── data/
│       │   ├── __init__.py
│       │   ├── data/
│       │   │   ├── __init__.py
│       │   │   ├── data_base.py
│       │   │   ├── data_iterators.py
│       │   │   ├── data_iterators_test.py
│       │   │   ├── dataset_utils.py
│       │   │   ├── dataset_utils_test.py
│       │   │   ├── mono_text_data.py
│       │   │   ├── mono_text_data_test.py
│       │   │   ├── multi_aligned_data.py
│       │   │   ├── multi_aligned_data_test.py
│       │   │   ├── paired_text_data.py
│       │   │   ├── paired_text_data_test.py
│       │   │   ├── scalar_data.py
│       │   │   ├── scalar_data_test.py
│       │   │   └── text_data_base.py
│       │   ├── data_decoders.py
│       │   ├── data_utils.py
│       │   ├── data_utils_test.py
│       │   ├── embedding.py
│       │   ├── embedding_test.py
│       │   ├── vocabulary.py
│       │   └── vocabulary_test.py
│       ├── evals/
│       │   ├── __init__.py
│       │   ├── bleu.py
│       │   ├── bleu_moses.py
│       │   ├── bleu_test.py
│       │   └── metrics.py
│       ├── hyperparams.py
│       ├── hyperparams_test.py
│       ├── losses/
│       │   ├── __init__.py
│       │   ├── adv_losses.py
│       │   ├── adv_losses_test.py
│       │   ├── entropy.py
│       │   ├── losses_utils.py
│       │   ├── mle_losses.py
│       │   ├── mle_losses_test.py
│       │   ├── pg_losses.py
│       │   ├── rewards.py
│       │   ├── rewards_test.py
│       │   └── rl_losses.py
│       ├── models/
│       │   ├── __init__.py
│       │   ├── model_base.py
│       │   └── seq2seq/
│       │       ├── __init__.py
│       │       ├── basic_seq2seq.py
│       │       └── seq2seq_base.py
│       ├── module_base.py
│       ├── modules/
│       │   ├── __init__.py
│       │   ├── classifiers/
│       │   │   ├── __init__.py
│       │   │   ├── classifier_base.py
│       │   │   ├── conv_classifiers.py
│       │   │   ├── conv_classifiers_test.py
│       │   │   ├── rnn_classifiers.py
│       │   │   └── rnn_classifiers_test.py
│       │   ├── connectors/
│       │   │   ├── __init__.py
│       │   │   ├── connector_base.py
│       │   │   ├── connectors.py
│       │   │   └── connectors_test.py
│       │   ├── decoders/
│       │   │   ├── __init__.py
│       │   │   ├── beam_search_decode.py
│       │   │   ├── beam_search_decode_test.py
│       │   │   ├── rnn_decoder_base.py
│       │   │   ├── rnn_decoder_helpers.py
│       │   │   ├── rnn_decoders.py
│       │   │   ├── rnn_decoders_test.py
│       │   │   ├── transformer_decoders.py
│       │   │   └── transformer_decoders_test.py
│       │   ├── embedders/
│       │   │   ├── __init__.py
│       │   │   ├── embedder_base.py
│       │   │   ├── embedder_utils.py
│       │   │   ├── embedder_utils_test.py
│       │   │   ├── embedders.py
│       │   │   ├── embedders_test.py
│       │   │   └── position_embedders.py
│       │   ├── encoders/
│       │   │   ├── __init__.py
│       │   │   ├── conv_encoders.py
│       │   │   ├── conv_encoders_test.py
│       │   │   ├── encoder_base.py
│       │   │   ├── hierarchical_encoders.py
│       │   │   ├── hierarchical_encoders_test.py
│       │   │   ├── multihead_attention.py
│       │   │   ├── rnn_encoders.py
│       │   │   ├── rnn_encoders_test.py
│       │   │   └── transformer_encoders.py
│       │   ├── memory/
│       │   │   ├── __init__.py
│       │   │   ├── embed_fns.py
│       │   │   ├── memory_network.py
│       │   │   └── memory_network_test.py
│       │   ├── networks/
│       │   │   ├── __init__.py
│       │   │   ├── conv_networks.py
│       │   │   ├── conv_networks_test.py
│       │   │   ├── network_base.py
│       │   │   ├── networks.py
│       │   │   └── networks_test.py
│       │   ├── policies/
│       │   │   ├── __init__.py
│       │   │   ├── policy_nets.py
│       │   │   └── policy_nets_test.py
│       │   └── qnets/
│       │       ├── __init__.py
│       │       └── qnets.py
│       ├── run/
│       │   ├── __init__.py
│       │   ├── executor.py
│       │   └── executor_test.py
│       └── utils/
│           ├── __init__.py
│           ├── average_recorder.py
│           ├── average_recorder_test.py
│           ├── beam_search.py
│           ├── dtypes.py
│           ├── exceptions.py
│           ├── mode.py
│           ├── mode_test.py
│           ├── shapes.py
│           ├── shapes_test.py
│           ├── transformer_attentions.py
│           ├── transformer_utils.py
│           ├── utils.py
│           ├── utils_io.py
│           ├── utils_test.py
│           └── variables.py
└── uncased_L-12_H-768_A-12/
    ├── bert_config.json
    └── vocab.txt
Download .txt
SYMBOL INDEX (1333 symbols across 172 files)

FILE: Inference.py
  function infer_single_example (line 49) | def infer_single_example(story,actual_summary,tokenizer):
  function results (line 81) | def results():

FILE: main.py
  function _train_epoch (line 14) | def _train_epoch(sess, epoch, step, smry_writer):
  function _eval_epoch (line 61) | def _eval_epoch(sess, epoch, mode):

FILE: preprocess.py
  class InputExample (line 13) | class InputExample():
    method __init__ (line 16) | def __init__(self, guid, text_a, text_b=None):
  class InputFeatures (line 31) | class InputFeatures():
    method __init__ (line 34) | def __init__(self, src_input_ids,src_input_mask,src_segment_ids,tgt_in...
  class DataProcessor (line 43) | class DataProcessor(object):
    method get_train_examples (line 46) | def get_train_examples(self, data_dir):
    method get_dev_examples (line 50) | def get_dev_examples(self, data_dir):
    method get_test_examples (line 54) | def get_test_examples(self, data_dir):
    method get_labels (line 58) | def get_labels(self):
    method _read_tsv (line 63) | def _read_tsv(cls, input_file, quotechar=None):
    method _read_file (line 75) | def _read_file(cls, input_file, quotechar=None):
  class CNNDailymail (line 86) | class CNNDailymail(DataProcessor):
    method get_train_examples (line 89) | def get_train_examples(self, data_dir):
    method get_dev_examples (line 95) | def get_dev_examples(self, data_dir):
    method get_test_examples (line 101) | def get_test_examples(self, data_dir):
    method _create_examples (line 107) | def _create_examples(self, src_lines,tgt_lines,set_type):
  function file_based_convert_examples_to_features (line 124) | def file_based_convert_examples_to_features(
  function convert_single_example (line 162) | def convert_single_example(ex_index, example, max_seq_length_src,max_seq...
  function file_based_input_fn_builder (line 246) | def file_based_input_fn_builder(input_file, max_seq_length_src,max_seq_l...
  function get_dataset (line 321) | def get_dataset(processor,

FILE: texar_repo/bin/average_checkpoints.py
  function main (line 29) | def main():

FILE: texar_repo/bin/train.py
  function _process_config (line 107) | def _process_config():
  function _get_run_config (line 146) | def _get_run_config(config):
  function main (line 168) | def main(_):

FILE: texar_repo/bin/utils/make_vocab.py
  function main (line 64) | def main(_):

FILE: texar_repo/examples/bert/bert_classifier_main.py
  function main (line 70) | def main(_):

FILE: texar_repo/examples/bert/utils/data_utils.py
  class InputExample (line 28) | class InputExample():
    method __init__ (line 31) | def __init__(self, guid, text_a, text_b=None, label=None):
  class InputFeatures (line 48) | class InputFeatures():
    method __init__ (line 51) | def __init__(self, input_ids, input_mask, segment_ids, label_id):
  class DataProcessor (line 58) | class DataProcessor(object):
    method get_train_examples (line 61) | def get_train_examples(self, data_dir):
    method get_dev_examples (line 65) | def get_dev_examples(self, data_dir):
    method get_test_examples (line 69) | def get_test_examples(self, data_dir):
    method get_labels (line 73) | def get_labels(self):
    method _read_tsv (line 78) | def _read_tsv(cls, input_file, quotechar=None):
  class SSTProcessor (line 88) | class SSTProcessor(DataProcessor):
    method get_train_examples (line 91) | def get_train_examples(self, data_dir):
    method get_dev_examples (line 96) | def get_dev_examples(self, data_dir):
    method get_test_examples (line 101) | def get_test_examples(self, data_dir):
    method get_labels (line 106) | def get_labels(self):
    method _create_examples (line 110) | def _create_examples(self, lines, set_type):
  class XnliProcessor (line 137) | class XnliProcessor(DataProcessor):
    method __init__ (line 140) | def __init__(self):
    method get_train_examples (line 143) | def get_train_examples(self, data_dir):
    method get_dev_examples (line 162) | def get_dev_examples(self, data_dir):
    method get_labels (line 180) | def get_labels(self):
  class MnliProcessor (line 184) | class MnliProcessor(DataProcessor):
    method get_train_examples (line 187) | def get_train_examples(self, data_dir):
    method get_dev_examples (line 192) | def get_dev_examples(self, data_dir):
    method get_test_examples (line 198) | def get_test_examples(self, data_dir):
    method get_labels (line 204) | def get_labels(self):
    method _create_examples (line 208) | def _create_examples(self, lines, set_type):
  class MrpcProcessor (line 226) | class MrpcProcessor(DataProcessor):
    method get_train_examples (line 229) | def get_train_examples(self, data_dir):
    method get_dev_examples (line 235) | def get_dev_examples(self, data_dir):
    method get_test_examples (line 241) | def get_test_examples(self, data_dir):
    method get_labels (line 247) | def get_labels(self):
    method _create_examples (line 251) | def _create_examples(self, lines, set_type):
  class ColaProcessor (line 268) | class ColaProcessor(DataProcessor):
    method get_train_examples (line 271) | def get_train_examples(self, data_dir):
    method get_dev_examples (line 277) | def get_dev_examples(self, data_dir):
    method get_test_examples (line 283) | def get_test_examples(self, data_dir):
    method get_labels (line 289) | def get_labels(self):
    method _create_examples (line 293) | def _create_examples(self, lines, set_type):
  function convert_single_example (line 312) | def convert_single_example(ex_index, example, label_list, max_seq_length,
  function file_based_convert_examples_to_features (line 408) | def file_based_convert_examples_to_features(
  function file_based_input_fn_builder (line 433) | def file_based_input_fn_builder(input_file, seq_length, is_training,
  function _truncate_seq_pair (line 501) | def _truncate_seq_pair(tokens_a, tokens_b, max_length):
  function get_dataset (line 518) | def get_dataset(processor,

FILE: texar_repo/examples/bert/utils/model_utils.py
  function transform_bert_to_texar_config (line 15) | def transform_bert_to_texar_config(input_json):
  function get_lr (line 76) | def get_lr(global_step, num_train_steps, num_warmup_steps, static_lr):
  function _get_assignment_map_from_checkpoint (line 108) | def _get_assignment_map_from_checkpoint(tvars, init_checkpoint):
  function init_bert_checkpoint (line 173) | def init_bert_checkpoint(init_checkpoint):
  function set_random_seed (line 181) | def set_random_seed(myseed):

FILE: texar_repo/examples/bert/utils/tokenization.py
  function convert_to_unicode (line 29) | def convert_to_unicode(text):
  function printable_text (line 33) | def printable_text(text):
  function load_vocab (line 37) | def load_vocab(vocab_file):
  function convert_by_vocab (line 52) | def convert_by_vocab(vocab, items):
  function convert_tokens_to_ids (line 60) | def convert_tokens_to_ids(vocab, tokens):
  function convert_ids_to_tokens (line 64) | def convert_ids_to_tokens(inv_vocab, ids):
  function whitespace_tokenize (line 68) | def whitespace_tokenize(text):
  class FullTokenizer (line 77) | class FullTokenizer(object):
    method __init__ (line 80) | def __init__(self, vocab_file, do_lower_case=True):
    method tokenize (line 86) | def tokenize(self, text):
    method convert_tokens_to_ids (line 94) | def convert_tokens_to_ids(self, tokens):
    method convert_ids_to_tokens (line 97) | def convert_ids_to_tokens(self, ids):
  class BasicTokenizer (line 101) | class BasicTokenizer(object):
    method __init__ (line 104) | def __init__(self, do_lower_case=True):
    method tokenize (line 112) | def tokenize(self, text):
    method _run_strip_accents (line 136) | def _run_strip_accents(self, text):
    method _run_split_on_punc (line 147) | def _run_split_on_punc(self, text):
    method _tokenize_chinese_chars (line 167) | def _tokenize_chinese_chars(self, text):
    method _is_chinese_char (line 180) | def _is_chinese_char(self, cp):
    method _clean_text (line 204) | def _clean_text(self, text):
  class WordpieceTokenizer (line 218) | class WordpieceTokenizer(object):
    method __init__ (line 221) | def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=...
    method tokenize (line 226) | def tokenize(self, text):
  function _is_whitespace (line 279) | def _is_whitespace(char):
  function _is_control (line 291) | def _is_control(char):
  function _is_punctuation (line 303) | def _is_punctuation(char):

FILE: texar_repo/examples/distributed_gpu/lm_ptb_distributed.py
  function _main (line 72) | def _main(_):

FILE: texar_repo/examples/distributed_gpu/ptb_reader.py
  function ptb_iterator (line 29) | def ptb_iterator(data, batch_size, num_steps, is_train=False):
  function prepare_data (line 61) | def prepare_data(data_path):

FILE: texar_repo/examples/hierarchical_dialog/hred.py
  function main (line 45) | def main():

FILE: texar_repo/examples/hierarchical_dialog/sw_loader.py
  class Dataset (line 31) | class Dataset(object):
    method __init__ (line 35) | def __init__(self, jsonl_path, mode=None):
    method __len__ (line 94) | def __len__(self):
    method __getitem__ (line 97) | def __getitem__(self, idx):
    method get (line 114) | def get(self, idx):
  function sw1c2r (line 122) | def sw1c2r(data_root):
  function generate_reference_for_test_dialog (line 133) | def generate_reference_for_test_dialog(dataset, data_root):
  function download_and_process (line 193) | def download_and_process(data_root):

FILE: texar_repo/examples/language_model_ptb/lm_ptb.py
  function _main (line 71) | def _main(_):

FILE: texar_repo/examples/language_model_ptb/ptb_reader.py
  function ptb_iterator (line 29) | def ptb_iterator(data, batch_size, num_steps):
  function prepare_data (line 47) | def prepare_data(data_path):

FILE: texar_repo/examples/memory_network_lm/lm_ptb_memnet.py
  function _main (line 62) | def _main(_):

FILE: texar_repo/examples/memory_network_lm/ptb_reader.py
  function ptb_iterator (line 29) | def ptb_iterator(data, batch_size, num_steps):
  function ptb_iterator_memnet (line 47) | def ptb_iterator_memnet(data, batch_size, memory_size):
  function prepare_data (line 66) | def prepare_data(data_path):

FILE: texar_repo/examples/rl_gym/pg_cartpole.py
  function _main (line 38) | def _main(_):

FILE: texar_repo/examples/sentence_classifier/clas_main.py
  function _main (line 40) | def _main(_):

FILE: texar_repo/examples/sentence_classifier/sst_data_preprocessor.py
  function clean_sst_text (line 39) | def clean_sst_text(text):
  function transform_raw_sst (line 46) | def transform_raw_sst(data_path, raw_fn, new_fn):
  function prepare_data (line 66) | def prepare_data(data_path):
  function _main (line 91) | def _main(_):

FILE: texar_repo/examples/seq2seq_attn/prepare_data.py
  function prepare_data (line 27) | def prepare_data():
  function main (line 47) | def main():

FILE: texar_repo/examples/seq2seq_attn/seq2seq_attn.py
  function build_model (line 37) | def build_model(batch, train_data):
  function main (line 83) | def main():

FILE: texar_repo/examples/seq2seq_exposure_bias/baseline_seq2seq_attn_main.py
  function build_model (line 50) | def build_model(batch, train_data):
  function print_stdout_and_file (line 96) | def print_stdout_and_file(content, file):
  function main (line 101) | def main():

FILE: texar_repo/examples/seq2seq_exposure_bias/interpolation_decoder.py
  class InterpolationDecoder (line 32) | class InterpolationDecoder(AttentionRNNDecoder):
    method __init__ (line 70) | def __init__(self,
    method initialize (line 83) | def initialize(self, name=None):
    method step (line 98) | def step(self, time, inputs, state, name=None):

FILE: texar_repo/examples/seq2seq_exposure_bias/interpolation_helper.py
  function calc_reward (line 32) | def calc_reward(refs, hypo, unk_id, metric):
  class InterpolationHelper (line 57) | class InterpolationHelper(SampleEmbeddingHelper):
    method __init__ (line 75) | def __init__(self,
    method sample (line 92) | def sample(self, time, outputs, state, name=None):
    method next_inputs (line 121) | def next_inputs(self, time, outputs, state, sample_ids, name=None):
    method _sample_by_reward (line 135) | def _sample_by_reward(self, time, state):

FILE: texar_repo/examples/seq2seq_exposure_bias/interpolation_main.py
  function build_model (line 71) | def build_model(batch, train_data, lambdas):
  function print_stdout_and_file (line 133) | def print_stdout_and_file(content, file):
  function main (line 138) | def main():

FILE: texar_repo/examples/seq2seq_exposure_bias/raml_main.py
  function read_raml_sample_file (line 64) | def read_raml_sample_file():
  function raml_loss (line 86) | def raml_loss(batch, output, training_rewards):
  function build_model (line 96) | def build_model(batch, train_data, rewards):
  function print_stdout_and_file (line 142) | def print_stdout_and_file(content, file):
  function main (line 147) | def main():

FILE: texar_repo/examples/seq2seq_exposure_bias/scheduled_sampling_main.py
  function inverse_sigmoid (line 60) | def inverse_sigmoid(i):
  function build_model (line 65) | def build_model(batch, train_data, self_sampling_proba):
  function print_stdout_and_file (line 120) | def print_stdout_and_file(content, file):
  function main (line 125) | def main():

FILE: texar_repo/examples/seq2seq_exposure_bias/utils/prepare_data.py
  function prepare_data (line 28) | def prepare_data():
  function main (line 49) | def main():

FILE: texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/process_samples.py
  function is_valid_sample (line 16) | def is_valid_sample(sent):
  function sample_from_model (line 21) | def sample_from_model(args):
  function get_new_ngram (line 86) | def get_new_ngram(ngram, n, vocab):
  function sample_ngram (line 97) | def sample_ngram(args):
  function sample_ngram_adapt (line 167) | def sample_ngram_adapt(args):
  function sample_from_hamming_distance_payoff_distribution (line 229) | def sample_from_hamming_distance_payoff_distribution(args):
  function generate_hamming_distance_payoff_distribution (line 270) | def generate_hamming_distance_payoff_distribution(max_sent_len, vocab_si...

FILE: texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/util.py
  function read_corpus (line 4) | def read_corpus(file_path, source):
  function batch_slice (line 16) | def batch_slice(data, batch_size, sort=True):
  function data_iter (line 31) | def data_iter(data, batch_size, shuffle=True):

FILE: texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/vocab.py
  class VocabEntry (line 11) | class VocabEntry(object):
    method __init__ (line 12) | def __init__(self):
    method __getitem__ (line 22) | def __getitem__(self, word):
    method __contains__ (line 25) | def __contains__(self, word):
    method __setitem__ (line 28) | def __setitem__(self, key, value):
    method __len__ (line 31) | def __len__(self):
    method __repr__ (line 34) | def __repr__(self):
    method id2word (line 37) | def id2word(self, wid):
    method add (line 40) | def add(self, word):
    method from_corpus (line 49) | def from_corpus(corpus, size, remove_singleton=True):
  class Vocab (line 67) | class Vocab(object):
    method __init__ (line 68) | def __init__(self, src_sents, tgt_sents, src_vocab_size, tgt_vocab_siz...
    method __repr__ (line 77) | def __repr__(self):

FILE: texar_repo/examples/seq2seq_rl/prepare_data.py
  function prepare_data (line 27) | def prepare_data():
  function main (line 47) | def main():

FILE: texar_repo/examples/seq2seq_rl/seq2seq_attn_pg.py
  function build_model (line 49) | def build_model(batch, train_data):
  function main (line 91) | def main():

FILE: texar_repo/examples/seqgan/data_utils.py
  function prepare_data (line 36) | def prepare_data(args, config, train_path):

FILE: texar_repo/examples/seqgan/seqgan_train.py
  function _main (line 40) | def _main(_):

FILE: texar_repo/examples/sequence_tagging/conll_reader.py
  function create_vocabs (line 38) | def create_vocabs(train_path, dev_path, test_path, normalize_digits=True...
  function read_data (line 92) | def read_data(source_path, word_vocab, char_vocab, ner_vocab, normalize_...
  function iterate_batch (line 109) | def iterate_batch(data, batch_size, shuffle=False):
  function load_glove (line 144) | def load_glove(filename, emb_dim, normalize_digits=True):
  function construct_init_word_vecs (line 175) | def construct_init_word_vecs(vocab, word_vecs, glove_dict):
  class CoNLLReader (line 188) | class CoNLLReader(object):
    method __init__ (line 189) | def __init__(self, file_path, word_vocab, char_vocab, ner_vocab):
    method close (line 195) | def close(self):
    method getNext (line 198) | def getNext(self, normalize_digits=True):
  class NERInstance (line 247) | class NERInstance(object):
    method __init__ (line 248) | def __init__(self, sentence, ner_tags, ner_ids):
    method length (line 253) | def length(self):
  class Sentence (line 257) | class Sentence(object):
    method __init__ (line 258) | def __init__(self, words, word_ids, char_seqs, char_id_seqs):
    method length (line 264) | def length(self):

FILE: texar_repo/examples/sequence_tagging/conll_writer.py
  class CoNLLWriter (line 4) | class CoNLLWriter(object):
    method __init__ (line 5) | def __init__(self, i2w, i2n):
    method start (line 10) | def start(self, file_path):
    method close (line 13) | def close(self):
    method write (line 16) | def write(self, word, predictions, targets, lengths):

FILE: texar_repo/examples/sequence_tagging/ner.py
  function _train_epoch (line 132) | def _train_epoch(sess, epoch):
  function _eval (line 164) | def _eval(sess, epoch, data_tag):

FILE: texar_repo/examples/sequence_tagging/scores.py
  function scores (line 4) | def scores(path):

FILE: texar_repo/examples/text_style_transfer/ctrl_gen_model.py
  class CtrlGenModel (line 32) | class CtrlGenModel(object):
    method __init__ (line 36) | def __init__(self, inputs, vocab, gamma, lambda_g, hparams=None):
    method _build_model (line 40) | def _build_model(self, inputs, vocab, gamma, lambda_g):

FILE: texar_repo/examples/text_style_transfer/main.py
  function _main (line 51) | def _main(_):

FILE: texar_repo/examples/text_style_transfer/prepare_data.py
  function prepare_data (line 20) | def prepare_data():
  function main (line 30) | def main():

FILE: texar_repo/examples/torchtext/batchfirst_bptt.py
  class BatchFirstBPTTIterator (line 18) | class BatchFirstBPTTIterator(BPTTIterator):
    method __len__ (line 47) | def __len__(self):
    method __iter__ (line 51) | def __iter__(self):

FILE: texar_repo/examples/torchtext/lm_torchtext.py
  function _main (line 48) | def _main(_):

FILE: texar_repo/examples/transformer/bleu_tool.py
  function _get_ngrams (line 54) | def _get_ngrams(segment, max_order):
  function compute_bleu (line 74) | def compute_bleu(reference_corpus,
  class UnicodeRegex (line 142) | class UnicodeRegex(object):
    method __init__ (line 145) | def __init__(self):
    method property_chars (line 151) | def property_chars(self, prefix):
  function bleu_tokenize (line 160) | def bleu_tokenize(string):
  function bleu_wrapper (line 190) | def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False):

FILE: texar_repo/examples/transformer/transformer_main.py
  function main (line 52) | def main():

FILE: texar_repo/examples/transformer/utils/data_utils.py
  function load_data_numpy (line 23) | def load_data_numpy(input_dir, prefix):
  function seq2seq_pad_concat_convert (line 33) | def seq2seq_pad_concat_convert(xy_batch, eos_id=2, bos_id=1):
  function source_pad_concat_convert (line 73) | def source_pad_concat_convert(x_seqs, eos_id=2, bos_id=1):
  function _concat_examples (line 85) | def _concat_examples(arrays, padding=0):
  function write_words (line 105) | def write_words(words_list, filename):

FILE: texar_repo/examples/transformer/utils/preprocess.py
  function split_sentence (line 36) | def split_sentence(s, tok=False):
  function open_file (line 52) | def open_file(path):
  function read_file (line 56) | def read_file(path, tok=False):
  function count_words (line 64) | def count_words(path, max_vocab_size=40000, tok=False):
  function make_array (line 74) | def make_array(word_id, words):
  function make_dataset (line 79) | def make_dataset(path, w2id, tok=False):
  function get_preprocess_args (line 94) | def get_preprocess_args():

FILE: texar_repo/examples/transformer/utils/utils.py
  function set_random_seed (line 28) | def set_random_seed(myseed):
  function batch_size_fn (line 33) | def batch_size_fn(new, count, size_so_far):
  function get_lr (line 41) | def get_lr(fstep, opt_config):
  function get_logger (line 50) | def get_logger(log_path):
  function list_strip_eos (line 65) | def list_strip_eos(list_, eos_token):

FILE: texar_repo/examples/vae_text/prepare_data.py
  function prepare_data (line 22) | def prepare_data(data_name):

FILE: texar_repo/examples/vae_text/vae_train.py
  function kl_dvg (line 58) | def kl_dvg(means, logvars):
  function _main (line 68) | def _main(_):

FILE: texar_repo/texar/agents/ac_agent.py
  class ActorCriticAgent (line 34) | class ActorCriticAgent(EpisodicAgentBase):
    method __init__ (line 77) | def __init__(self,
    method default_hparams (line 122) | def default_hparams():
    method _reset (line 175) | def _reset(self):
    method _observe (line 179) | def _observe(self, reward, terminal, train_policy, feed_dict):
    method _train_actor (line 186) | def _train_actor(self, observ, action, feed_dict):
    method get_action (line 200) | def get_action(self, observ, feed_dict=None):
    method sess (line 209) | def sess(self):
    method sess (line 215) | def sess(self, session):

FILE: texar_repo/texar/agents/agent_base.py
  class AgentBase (line 30) | class AgentBase(object):
    method __init__ (line 37) | def __init__(self, hparams=None):
    method default_hparams (line 45) | def default_hparams():
    method variable_scope (line 55) | def variable_scope(self):
    method name (line 61) | def name(self):
    method hparams (line 67) | def hparams(self):

FILE: texar_repo/texar/agents/agent_gym_utils.py
  function convert_gym_space (line 28) | def convert_gym_space(spc):
  function get_gym_env_config (line 45) | def get_gym_env_config(env):

FILE: texar_repo/texar/agents/agent_utils.py
  class Space (line 37) | class Space(object):
    method __init__ (line 71) | def __init__(self, shape=None, low=None, high=None, dtype=None):
    method contains (line 104) | def contains(self, x):
    method shape (line 118) | def shape(self):
    method low (line 124) | def low(self):
    method high (line 130) | def high(self):
    method dtype (line 136) | def dtype(self):
  class EnvConfig (line 141) | class EnvConfig(object):
    method __init__ (line 153) | def __init__(self,

FILE: texar_repo/texar/agents/agent_utils_test.py
  class SpaceTest (line 19) | class SpaceTest(tf.test.TestCase):
    method _test_space (line 23) | def _test_space(self, s, shape, low, high, dtype):
    method test_space (line 29) | def test_space(self):

FILE: texar_repo/texar/agents/dqn_agent.py
  class DQNAgent (line 38) | class DQNAgent(EpisodicAgentBase):
    method __init__ (line 87) | def __init__(self,
    method default_hparams (line 151) | def default_hparams():
    method _build_graph (line 260) | def _build_graph(self):
    method _get_qnet_outputs (line 288) | def _get_qnet_outputs(self, state_inputs):
    method _get_target_outputs (line 291) | def _get_target_outputs(self, state_inputs):
    method _get_td_error (line 294) | def _get_td_error(self, qnet_qvalues, actions, y):
    method _get_train_op (line 297) | def _get_train_op(self):
    method _get_copy_update_op (line 304) | def _get_copy_update_op(self):
    method _get_tau_update_op (line 311) | def _get_tau_update_op(self):
    method _observe (line 321) | def _observe(self, reward, terminal, train_policy, feed_dict):
    method _train_qnet (line 336) | def _train_qnet(self, feed_dict):
    method _update_target (line 366) | def _update_target(self, feed_dict):
    method _qvalues_from_qnet (line 372) | def _qvalues_from_qnet(self, observ):
    method _qvalues_from_target (line 378) | def _qvalues_from_target(self, observ):
    method _update_observ_action (line 384) | def _update_observ_action(self, observ, action):
    method _get_action (line 390) | def _get_action(self, observ, feed_dict=None):
    method _reset (line 402) | def _reset(self):
    method sess (line 407) | def sess(self):
    method sess (line 413) | def sess(self, session):

FILE: texar_repo/texar/agents/episodic_agent_base.py
  class EpisodicAgentBase (line 27) | class EpisodicAgentBase(AgentBase):
    method __init__ (line 66) | def __init__(self, env_config, hparams=None):
    method default_hparams (line 79) | def default_hparams():
    method reset (line 92) | def reset(self):
    method _reset (line 97) | def _reset(self):
    method observe (line 100) | def observe(self, reward, terminal, train_policy=True, feed_dict=None):
    method _observe (line 113) | def _observe(self, reward, terminal, train_policy, feed_dict):
    method get_action (line 116) | def get_action(self, observ, feed_dict=None):
    method _get_action (line 127) | def _get_action(self, observ, feed_dict):
    method env_config (line 131) | def env_config(self):

FILE: texar_repo/texar/agents/pg_agent.py
  class PGAgent (line 31) | class PGAgent(EpisodicAgentBase):
    method __init__ (line 80) | def __init__(self,
    method _build_graph (line 113) | def _build_graph(self):
    method _get_policy_outputs (line 134) | def _get_policy_outputs(self):
    method _get_pg_loss (line 139) | def _get_pg_loss(self):
    method _get_train_op (line 154) | def _get_train_op(self):
    method default_hparams (line 163) | def default_hparams():
    method _reset (line 216) | def _reset(self):
    method _get_action (line 221) | def _get_action(self, observ, feed_dict):
    method _observe (line 238) | def _observe(self, reward, terminal, train_policy, feed_dict):
    method _train_policy (line 244) | def _train_policy(self, feed_dict=None):
    method sess (line 265) | def sess(self):
    method sess (line 271) | def sess(self, session):
    method policy (line 275) | def policy(self):

FILE: texar_repo/texar/agents/seq_agent_base.py
  class SeqAgentBase (line 25) | class SeqAgentBase(AgentBase):
    method __init__ (line 32) | def __init__(self, hparams=None):
    method default_hparams (line 37) | def default_hparams():

FILE: texar_repo/texar/agents/seq_pg_agent.py
  class SeqPGAgent (line 34) | class SeqPGAgent(SeqAgentBase):
    method __init__ (line 86) | def __init__(self,
    method _build_graph (line 117) | def _build_graph(self):
    method _get_pg_loss (line 126) | def _get_pg_loss(self):
    method _get_entropy (line 146) | def _get_entropy(self):
    method _get_train_op (line 157) | def _get_train_op(self):
    method default_hparams (line 166) | def default_hparams():
    method _get_partial_run_feeds (line 230) | def _get_partial_run_feeds(self, feeds=None):
    method _setup_partial_run (line 236) | def _setup_partial_run(self, fetches=None, feeds=None):
    method _check_extra_fetches (line 251) | def _check_extra_fetches(self, extra_fetches):
    method get_samples (line 273) | def get_samples(self, extra_fetches=None, feed_dict=None):
    method observe (line 335) | def observe(self, reward, train_policy=True, compute_loss=True):
    method _get_qvalues (line 362) | def _get_qvalues(self):
    method _evaluate_pg_loss (line 370) | def _evaluate_pg_loss(self):
    method _train_policy (line 387) | def _train_policy(self):
    method sess (line 407) | def sess(self):
    method sess (line 413) | def sess(self, sess):
    method pg_loss (line 417) | def pg_loss(self):
    method sequence_length (line 423) | def sequence_length(self):
    method samples (line 429) | def samples(self):
    method logits (line 435) | def logits(self):

FILE: texar_repo/texar/agents/seq_pg_agent_test.py
  class SeqPGAgentTest (line 29) | class SeqPGAgentTest(tf.test.TestCase):
    method setUp (line 33) | def setUp(self):
    method test_seq_pg_agent (line 45) | def test_seq_pg_agent(self):

FILE: texar_repo/texar/context.py
  function global_mode (line 34) | def global_mode():
  function global_mode_train (line 66) | def global_mode_train():
  function global_mode_eval (line 84) | def global_mode_eval():
  function global_mode_predict (line 90) | def global_mode_predict():
  function valid_modes (line 96) | def valid_modes():

FILE: texar_repo/texar/context_test.py
  class ContextTest (line 18) | class ContextTest(tf.test.TestCase):
    method test_global_mode (line 22) | def test_global_mode(self):

FILE: texar_repo/texar/core/explorations.py
  class ExplorationBase (line 31) | class ExplorationBase(object):
    method __init__ (line 39) | def __init__(self, hparams=None):
    method default_hparams (line 43) | def default_hparams():
    method get_epsilon (line 56) | def get_epsilon(self, timestep):
    method hparams (line 68) | def hparams(self):
  class EpsilonLinearDecayExploration (line 74) | class EpsilonLinearDecayExploration(ExplorationBase):
    method __init__ (line 82) | def __init__(self, hparams=None):
    method default_hparams (line 86) | def default_hparams():
    method get_epsilon (line 112) | def get_epsilon(self, timestep):

FILE: texar_repo/texar/core/layers.py
  function default_rnn_cell_hparams (line 72) | def default_rnn_cell_hparams():
  function get_rnn_cell (line 189) | def get_rnn_cell(hparams=None, mode=None):
  function get_rnn_cell_trainable_variables (line 279) | def get_rnn_cell_trainable_variables(cell):
  function default_regularizer_hparams (line 298) | def default_regularizer_hparams():
  function get_regularizer (line 323) | def get_regularizer(hparams=None):
  function get_initializer (line 366) | def get_initializer(hparams=None):
  function get_activation_fn (line 421) | def get_activation_fn(fn_name="identity", kwargs=None):
  function get_constraint_fn (line 466) | def get_constraint_fn(fn_name="NonNeg"):
  function get_layer (line 503) | def get_layer(hparams):
  function _compute_concat_output_shape (line 603) | def _compute_concat_output_shape(input_shape, axis):
  class _ReducePooling1D (line 628) | class _ReducePooling1D(tf.layers.Layer):
    method __init__ (line 636) | def __init__(self, reduce_function, data_format='channels_last',
    method compute_output_shape (line 645) | def compute_output_shape(self, input_shape):
    method call (line 652) | def call(self, inputs):
  class MaxReducePooling1D (line 658) | class MaxReducePooling1D(_ReducePooling1D):
    method __init__ (line 664) | def __init__(self, data_format='channels_last', name=None, **kwargs):
  class AverageReducePooling1D (line 668) | class AverageReducePooling1D(_ReducePooling1D):
    method __init__ (line 674) | def __init__(self, data_format='channels_last', name=None, **kwargs):
  function get_pooling_layer_hparams (line 685) | def get_pooling_layer_hparams(hparams):
  class MergeLayer (line 708) | class MergeLayer(tf.layers.Layer):
    method __init__ (line 753) | def __init__(self,
    method compute_output_shape (line 780) | def compute_output_shape(self, input_shape):
    method _collect_weights (line 814) | def _collect_weights(self):
    method call (line 829) | def call(self, inputs):
    method layers (line 884) | def layers(self):
  class SequentialLayer (line 890) | class SequentialLayer(tf.layers.Layer):
    method __init__ (line 901) | def __init__(self,
    method compute_output_shape (line 921) | def compute_output_shape(self, input_shape):
    method _collect_weights (line 928) | def _collect_weights(self):
    method call (line 941) | def call(self, inputs, mode=None): # pylint: disable=arguments-differ
    method layers (line 960) | def layers(self):
  function _common_default_conv_dense_kwargs (line 966) | def _common_default_conv_dense_kwargs():
  function default_conv1d_kwargs (line 990) | def default_conv1d_kwargs():
  function default_conv2d_kwargs (line 1045) | def default_conv2d_kwargs():
  function default_conv3d_kwargs (line 1049) | def default_conv3d_kwargs():
  function default_conv2d_transpose_kwargs (line 1053) | def default_conv2d_transpose_kwargs():
  function default_conv3d_transpose_kwargs (line 1057) | def default_conv3d_transpose_kwargs():
  function default_dense_kwargs (line 1062) | def default_dense_kwargs():
  function default_dropout_kwargs (line 1107) | def default_dropout_kwargs():
  function default_flatten_kwargs (line 1112) | def default_flatten_kwargs():
  function default_max_pooling1d_kwargs (line 1116) | def default_max_pooling1d_kwargs():
  function default_max_pooling2d_kwargs (line 1121) | def default_max_pooling2d_kwargs():
  function default_max_pooling3d_kwargs (line 1126) | def default_max_pooling3d_kwargs():
  function default_separable_conv2d_kwargs (line 1131) | def default_separable_conv2d_kwargs():
  function default_batch_normalization_kwargs (line 1136) | def default_batch_normalization_kwargs():
  function default_average_pooling1d_kwargs (line 1141) | def default_average_pooling1d_kwargs():
  function default_average_pooling2d_kwargs (line 1146) | def default_average_pooling2d_kwargs():
  function default_average_pooling3d_kwargs (line 1151) | def default_average_pooling3d_kwargs():
  function layer_normalize (line 1175) | def layer_normalize(inputs,
  function gelu (line 1192) | def gelu(input_tensor):

FILE: texar_repo/texar/core/layers_test.py
  class GetRNNCellTest (line 24) | class GetRNNCellTest(tf.test.TestCase):
    method test_get_rnn_cell (line 28) | def test_get_rnn_cell(self):
    method test_switch_dropout (line 95) | def test_switch_dropout(self):
  class GetActivationFnTest (line 129) | class GetActivationFnTest(tf.test.TestCase):
    method test_get_activation_fn (line 132) | def test_get_activation_fn(self):
  class GetLayerTest (line 160) | class GetLayerTest(tf.test.TestCase):
    method test_get_layer (line 163) | def test_get_layer(self):
  class ReducePoolingLayerTest (line 197) | class ReducePoolingLayerTest(tf.test.TestCase):
    method setUp (line 200) | def setUp(self):
    method test_max_reduce_pooling_layer (line 207) | def test_max_reduce_pooling_layer(self):
    method test_average_reduce_pooling_layer (line 225) | def test_average_reduce_pooling_layer(self):
  class MergeLayerTest (line 243) | class MergeLayerTest(tf.test.TestCase):
    method test_output_shape (line 247) | def test_output_shape(self):
    method test_layer_logics (line 265) | def test_layer_logics(self):
    method test_trainable_variables (line 287) | def test_trainable_variables(self):
  class SequentialLayerTest (line 304) | class SequentialLayerTest(tf.test.TestCase):
    method test_seq_layer (line 308) | def test_seq_layer(self):

FILE: texar_repo/texar/core/optimization.py
  function default_optimization_hparams (line 40) | def default_optimization_hparams():
  function get_optimizer_fn (line 173) | def get_optimizer_fn(hparams=None):
  function get_learning_rate_decay_fn (line 231) | def get_learning_rate_decay_fn(hparams=None):
  function get_gradient_clip_fn (line 294) | def get_gradient_clip_fn(hparams=None):
  function _get_static_lr (line 353) | def _get_static_lr(learning_rate=None, optimizer_class=None, hparams=None):
  function get_optimizer (line 368) | def get_optimizer(learning_rate=None, global_step=None, hparams=None):
  function get_train_op (line 410) | def get_train_op(loss, variables=None,
  class AdamWeightDecayOptimizer (line 482) | class AdamWeightDecayOptimizer(tf.train.Optimizer):
    method __init__ (line 491) | def __init__(self,
    method apply_gradients (line 510) | def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    method _do_use_weight_decay (line 575) | def _do_use_weight_decay(self, param_name):
    method _get_variable_name (line 585) | def _get_variable_name(self, param_name):

FILE: texar_repo/texar/core/optimization_test.py
  class OptimizationTest (line 19) | class OptimizationTest(tf.test.TestCase):
    method test_get_optimizer (line 23) | def test_get_optimizer(self):
    method test_get_learning_rate_decay_fn (line 62) | def test_get_learning_rate_decay_fn(self): # pylint: disable=too-many-...
    method test_get_gradient_clip_fn (line 106) | def test_get_gradient_clip_fn(self):    # pylint: disable=too-many-locals
    method test_get_train_op (line 149) | def test_get_train_op(self):

FILE: texar_repo/texar/core/replay_memories.py
  class ReplayMemoryBase (line 32) | class ReplayMemoryBase(object):
    method __init__ (line 40) | def __init__(self, hparams=None):
    method default_hparams (line 44) | def default_hparams():
    method add (line 57) | def add(self, element):
    method get (line 62) | def get(self, size):
    method last (line 67) | def last(self):
    method size (line 72) | def size(self):
  class DequeReplayMemory (line 78) | class DequeReplayMemory(ReplayMemoryBase):
    method __init__ (line 88) | def __init__(self, hparams=None):
    method default_hparams (line 94) | def default_hparams():
    method add (line 115) | def add(self, element):
    method get (line 124) | def get(self, size):
    method last (line 130) | def last(self):
    method size (line 135) | def size(self):

FILE: texar_repo/texar/data/data/data_base.py
  class DataBase (line 35) | class DataBase(object):
    method __init__ (line 39) | def __init__(self, hparams):
    method default_hparams (line 43) | def default_hparams():
    method _make_batch (line 148) | def _make_batch(dataset, hparams, padded_batch=False, padding_values=N...
    method _shuffle_dataset (line 166) | def _shuffle_dataset(dataset, hparams, dataset_files):
    method num_epochs (line 194) | def num_epochs(self):
    method batch_size (line 200) | def batch_size(self):
    method hparams (line 206) | def hparams(self):
    method name (line 213) | def name(self):

FILE: texar_repo/texar/data/data/data_iterators.py
  class DataIteratorBase (line 36) | class DataIteratorBase(object):
    method __init__ (line 54) | def __init__(self, datasets):
    method num_datasets (line 76) | def num_datasets(self):
    method dataset_names (line 82) | def dataset_names(self):
  class DataIterator (line 88) | class DataIterator(DataIteratorBase):
    method __init__ (line 133) | def __init__(self, datasets):
    method switch_to_dataset (line 146) | def switch_to_dataset(self, sess, dataset_name=None):
    method get_next (line 164) | def get_next(self):
  class TrainTestDataIterator (line 169) | class TrainTestDataIterator(DataIterator):
    method __init__ (line 211) | def __init__(self, train=None, val=None, test=None):
    method switch_to_train_data (line 229) | def switch_to_train_data(self, sess):
    method switch_to_val_data (line 239) | def switch_to_val_data(self, sess):
    method switch_to_test_data (line 249) | def switch_to_test_data(self, sess):
  class FeedableDataIterator (line 259) | class FeedableDataIterator(DataIteratorBase):
    method __init__ (line 320) | def __init__(self, datasets):
    method get_handle (line 337) | def get_handle(self, sess, dataset_name=None):
    method restart_dataset (line 368) | def restart_dataset(self, sess, dataset_name=None):
    method initialize_dataset (line 380) | def initialize_dataset(self, sess, dataset_name=None):
    method get_next (line 398) | def get_next(self):
    method handle (line 404) | def handle(self):
  class TrainTestFeedableDataIterator (line 410) | class TrainTestFeedableDataIterator(FeedableDataIterator):
    method __init__ (line 468) | def __init__(self, train=None, val=None, test=None):
    method get_train_handle (line 485) | def get_train_handle(self, sess):
    method get_val_handle (line 509) | def get_val_handle(self, sess):
    method get_test_handle (line 523) | def get_test_handle(self, sess):
    method restart_train_dataset (line 537) | def restart_train_dataset(self, sess):
    method restart_val_dataset (line 548) | def restart_val_dataset(self, sess):
    method restart_test_dataset (line 559) | def restart_test_dataset(self, sess):

FILE: texar_repo/texar/data/data/data_iterators_test.py
  class DataIteratorTest (line 22) | class DataIteratorTest(tf.test.TestCase):
    method setUp (line 26) | def setUp(self):
    method test_iterator_single_dataset (line 77) | def test_iterator_single_dataset(self):
    method test_iterator_multi_datasets (line 106) | def test_iterator_multi_datasets(self):
    method test_train_test_data_iterator (line 151) | def test_train_test_data_iterator(self):
    method test_feedable_iterator_multi_datasets (line 195) | def test_feedable_iterator_multi_datasets(self):
    method test_train_test_feedable_data_iterator (line 247) | def test_train_test_feedable_data_iterator(self):

FILE: texar_repo/texar/data/data/dataset_utils.py
  class _DataSpec (line 43) | class _DataSpec(object):
    method __init__ (line 55) | def __init__(self, dataset=None, dataset_size=None, decoder=None,
    method add_spec (line 64) | def add_spec(self, **kwargs):
    method get_ith_data_spec (line 69) | def get_ith_data_spec(self, i):
    method set_ith_data_spec (line 78) | def set_ith_data_spec(self, i, data_spec, total_count):
  function _make_length_filter_fn (line 96) | def _make_length_filter_fn(length_name, max_length):
  function _make_smaller_batch_filter_fn (line 104) | def _make_smaller_batch_filter_fn(batch_size):
  function _make_combined_filter_fn (line 118) | def _make_combined_filter_fn(filter_fns, mode="and"):
  function _connect_name (line 145) | def _connect_name(lhs_name, rhs_name):
  function maybe_tuple (line 152) | def maybe_tuple(data):
  function make_partial (line 161) | def make_partial(fn, *args, **kwargs):
  function name_prefix_fn (line 169) | def name_prefix_fn(name_prefix):
  function make_chained_transformation (line 181) | def make_chained_transformation(tran_fns, *args, **kwargs):
  function make_combined_transformation (line 202) | def make_combined_transformation(tran_fns, name_prefix=None, *args, **kw...
  function random_shard_dataset (line 250) | def random_shard_dataset(dataset_size, shard_size, seed=None):

FILE: texar_repo/texar/data/data/dataset_utils_test.py
  class TransformationTest (line 21) | class TransformationTest(tf.test.TestCase):
    method test_make_chained_transformation (line 25) | def test_make_chained_transformation(self):

FILE: texar_repo/texar/data/data/mono_text_data.py
  class _LengthFilterMode (line 42) | class _LengthFilterMode(object): # pylint: disable=no-init, too-few-publ...
  function _default_mono_text_dataset_hparams (line 48) | def _default_mono_text_dataset_hparams():
  class MonoTextData (line 72) | class MonoTextData(TextDataBase):
    method __init__ (line 144) | def __init__(self, hparams):
    method default_hparams (line 150) | def default_hparams():
    method make_vocab (line 319) | def make_vocab(hparams):
    method make_embedding (line 332) | def make_embedding(emb_hparams, token_to_id_map):
    method _make_mono_text_dataset (line 342) | def _make_mono_text_dataset(dataset_hparams):
    method _make_other_transformations (line 349) | def _make_other_transformations(other_trans_hparams, data_spec):
    method _make_processor (line 370) | def _make_processor(dataset_hparams, data_spec, chained=True,
    method _make_length_filter (line 411) | def _make_length_filter(dataset_hparams, length_name, decoder):
    method _process_dataset (line 421) | def _process_dataset(self, dataset, hparams, data_spec):
    method _make_bucket_length_fn (line 444) | def _make_bucket_length_fn(self):
    method _make_padded_text_and_id_shapes (line 454) | def _make_padded_text_and_id_shapes(dataset, dataset_hparams, decoder,
    method _make_padded_shapes (line 487) | def _make_padded_shapes(self, dataset, decoder):
    method _make_data (line 500) | def _make_data(self):
    method list_items (line 536) | def list_items(self):
    method dataset (line 545) | def dataset(self):
    method dataset_size (line 551) | def dataset_size(self):
    method vocab (line 564) | def vocab(self):
    method embedding_init_value (line 570) | def embedding_init_value(self):
    method text_name (line 579) | def text_name(self):
    method length_name (line 588) | def length_name(self):
    method text_id_name (line 597) | def text_id_name(self):
    method utterance_cnt_name (line 606) | def utterance_cnt_name(self):

FILE: texar_repo/texar/data/data/mono_text_data_test.py
  class MonoTextDataTest (line 23) | class MonoTextDataTest(tf.test.TestCase):
    method setUp (line 27) | def setUp(self):
    method _run_and_test (line 53) | def _run_and_test(self,
    method test_default_setting (line 118) | def test_default_setting(self):
    method test_batching (line 123) | def test_batching(self):
    method test_bucketing (line 131) | def test_bucketing(self):
    method test_shuffle (line 183) | def test_shuffle(self):
    method test_prefetch (line 192) | def test_prefetch(self):
    method test_other_transformations (line 199) | def test_other_transformations(self):
    method test_list_items (line 211) | def test_list_items(self):
    method test_length_discard (line 224) | def test_length_discard(self):
    method test_length_truncate (line 232) | def test_length_truncate(self):
    method test_pad_to_max_length (line 242) | def test_pad_to_max_length(self):
  class VarUttMonoTextDataTest (line 252) | class VarUttMonoTextDataTest(tf.test.TestCase):
    method setUp (line 256) | def setUp(self):
    method _run_and_test (line 294) | def _run_and_test(self, hparams):
    method test_default_setting (line 338) | def test_default_setting(self):
    method test_pad_to_max_length (line 343) | def test_pad_to_max_length(self):

FILE: texar_repo/texar/data/data/multi_aligned_data.py
  class _DataTypes (line 48) | class _DataTypes(object): # pylint: disable=no-init, too-few-public-methods
  function _is_text_data (line 55) | def _is_text_data(data_type):
  function _is_scalar_data (line 57) | def _is_scalar_data(data_type):
  function _default_dataset_hparams (line 60) | def _default_dataset_hparams(data_type=None):
  class MultiAlignedData (line 77) | class MultiAlignedData(TextDataBase):
    method __init__ (line 120) | def __init__(self, hparams):
    method default_hparams (line 136) | def default_hparams():
    method _raise_sharing_error (line 224) | def _raise_sharing_error(err_data, shr_data, hparam_name):
    method make_vocab (line 230) | def make_vocab(hparams):
    method make_embedding (line 286) | def make_embedding(hparams, vocabs):
    method _make_dataset (line 323) | def _make_dataset(self):
    method _get_name_prefix (line 361) | def _get_name_prefix(dataset_hparams):
    method _make_processor (line 369) | def _make_processor(dataset_hparams, data_spec, name_prefix):
    method _make_length_filter (line 406) | def _make_length_filter(dataset_hparams, length_name, decoder):
    method _process_dataset (line 418) | def _process_dataset(self, dataset, hparams, data_spec):
    method _make_bucket_length_fn (line 451) | def _make_bucket_length_fn(self):
    method _make_padded_shapes (line 467) | def _make_padded_shapes(self, dataset, decoders):
    method _make_data (line 482) | def _make_data(self):
    method list_items (line 516) | def list_items(self):
    method dataset (line 525) | def dataset(self):
    method dataset_size (line 530) | def dataset_size(self):
    method _maybe_name_to_id (line 542) | def _maybe_name_to_id(self, name_or_id):
    method vocab (line 549) | def vocab(self, name_or_id):
    method embedding_init_value (line 559) | def embedding_init_value(self, name_or_id):
    method text_name (line 566) | def text_name(self, name_or_id):
    method length_name (line 578) | def length_name(self, name_or_id):
    method text_id_name (line 590) | def text_id_name(self, name_or_id):
    method utterance_cnt_name (line 602) | def utterance_cnt_name(self, name_or_id):
    method data_name (line 616) | def data_name(self, name_or_id):

FILE: texar_repo/texar/data/data/multi_aligned_data_test.py
  class MultiAlignedDataTest (line 22) | class MultiAlignedDataTest(tf.test.TestCase):
    method setUp (line 26) | def setUp(self):
    method _run_and_test (line 95) | def _run_and_test(self, hparams, discard_did=None):
    method test_default_setting (line 159) | def test_default_setting(self):
    method test_length_filter (line 164) | def test_length_filter(self):

FILE: texar_repo/texar/data/data/paired_text_data.py
  function _default_paired_text_dataset_hparams (line 45) | def _default_paired_text_dataset_hparams():
  class PairedTextData (line 68) | class PairedTextData(TextDataBase):
    method __init__ (line 138) | def __init__(self, hparams):
    method default_hparams (line 144) | def default_hparams():
    method make_vocab (line 238) | def make_vocab(src_hparams, tgt_hparams):
    method make_embedding (line 279) | def make_embedding(src_emb_hparams, src_token_to_id_map,
    method _make_dataset (line 299) | def _make_dataset(self):
    method _get_name_prefix (line 309) | def _get_name_prefix(src_hparams, tgt_hparams):
    method _make_processor (line 318) | def _make_processor(src_hparams, tgt_hparams, data_spec, name_prefix):
    method _make_length_filter (line 349) | def _make_length_filter(src_hparams, tgt_hparams,
    method _process_dataset (line 360) | def _process_dataset(self, dataset, hparams, data_spec):
    method _make_bucket_length_fn (line 391) | def _make_bucket_length_fn(self):
    method _make_padded_shapes (line 401) | def _make_padded_shapes(self, dataset, src_decoder, tgt_decoder):
    method _make_data (line 422) | def _make_data(self):
    method list_items (line 469) | def list_items(self):
    method dataset (line 478) | def dataset(self):
    method dataset_size (line 483) | def dataset_size(self):
    method vocab (line 496) | def vocab(self):
    method source_vocab (line 503) | def source_vocab(self):
    method target_vocab (line 509) | def target_vocab(self):
    method source_embedding_init_value (line 515) | def source_embedding_init_value(self):
    method target_embedding_init_value (line 524) | def target_embedding_init_value(self):
    method embedding_init_value (line 532) | def embedding_init_value(self):
    method source_text_name (line 541) | def source_text_name(self):
    method source_length_name (line 550) | def source_length_name(self):
    method source_text_id_name (line 559) | def source_text_id_name(self):
    method source_utterance_cnt_name (line 569) | def source_utterance_cnt_name(self):
    method target_text_name (line 582) | def target_text_name(self):
    method target_length_name (line 591) | def target_length_name(self):
    method target_text_id_name (line 600) | def target_text_id_name(self):
    method target_utterance_cnt_name (line 610) | def target_utterance_cnt_name(self):
    method text_name (line 623) | def text_name(self):
    method length_name (line 629) | def length_name(self):
    method text_id_name (line 635) | def text_id_name(self):
    method utterance_cnt_name (line 641) | def utterance_cnt_name(self):

FILE: texar_repo/texar/data/data/paired_text_data_test.py
  class PairedTextDataTest (line 24) | class PairedTextDataTest(tf.test.TestCase):
    method setUp (line 28) | def setUp(self):
    method _run_and_test (line 65) | def _run_and_test(self, hparams, proc_shr=False, length_inc=None,
    method test_default_setting (line 128) | def test_default_setting(self):
    method test_shuffle (line 133) | def test_shuffle(self):
    method test_processing_share (line 140) | def test_processing_share(self):
    method test_other_transformations (line 147) | def test_other_transformations(self):
    method test_length_filter (line 161) | def test_length_filter(self):

FILE: texar_repo/texar/data/data/scalar_data.py
  function _default_scalar_dataset_hparams (line 39) | def _default_scalar_dataset_hparams():
  class ScalarData (line 53) | class ScalarData(DataBase):
    method __init__ (line 89) | def __init__(self, hparams):
    method default_hparams (line 95) | def default_hparams():
    method _get_dtype (line 159) | def _get_dtype(dtype_hparam):
    method _make_processor (line 169) | def _make_processor(dataset_hparams, data_spec, chained=True,
    method _process_dataset (line 190) | def _process_dataset(self, dataset, hparams, data_spec):
    method _make_data (line 204) | def _make_data(self):
    method list_items (line 231) | def list_items(self):
    method dataset (line 240) | def dataset(self):
    method dataset_size (line 245) | def dataset_size(self):
    method data_name (line 258) | def data_name(self):

FILE: texar_repo/texar/data/data/scalar_data_test.py
  class ScalarDataTest (line 20) | class ScalarDataTest(tf.test.TestCase):
    method setUp (line 24) | def setUp(self):
    method _run_and_test (line 59) | def _run_and_test(self, hparams):
    method test_default_setting (line 94) | def test_default_setting(self):
    method test_shuffle (line 100) | def test_shuffle(self):

FILE: texar_repo/texar/data/data/text_data_base.py
  class TextDataBase (line 34) | class TextDataBase(DataBase): # pylint: disable=too-few-public-methods
    method __init__ (line 38) | def __init__(self, hparams):
    method default_hparams (line 42) | def default_hparams():
    method _make_batch (line 55) | def _make_batch(dataset, hparams, element_length_func,

FILE: texar_repo/texar/data/data_decoders.py
  function _append_token (line 41) | def _append_token(token):
  class ScalarDataDecoder (line 44) | class ScalarDataDecoder(data_decoder.DataDecoder):
    method __init__ (line 55) | def __init__(self, dtype=tf.int32, data_name="data"):
    method __call__ (line 61) | def __call__(self, data):
    method decode (line 65) | def decode(self, data, items):
    method list_items (line 87) | def list_items(self):
    method data_tensor_name (line 96) | def data_tensor_name(self):
  class TextDataDecoder (line 101) | class TextDataDecoder(data_decoder.DataDecoder):
    method __init__ (line 134) | def __init__(self,
    method __call__ (line 155) | def __call__(self, data):
    method decode (line 159) | def decode(self, data, items):
    method list_items (line 205) | def list_items(self):
    method text_tensor_name (line 216) | def text_tensor_name(self):
    method text_tensor_name (line 222) | def text_tensor_name(self, name):
    method length_tensor_name (line 226) | def length_tensor_name(self):
    method length_tensor_name (line 232) | def length_tensor_name(self, name):
    method text_id_tensor_name (line 236) | def text_id_tensor_name(self):
    method text_id_tensor_name (line 242) | def text_id_tensor_name(self, name):
    method added_length (line 246) | def added_length(self):
  class VarUttTextDataDecoder (line 251) | class VarUttTextDataDecoder(data_decoder.DataDecoder):
    method __init__ (line 292) | def __init__(self,
    method __call__ (line 319) | def __call__(self, data):
    method decode (line 323) | def decode(self, data, items): # pylint: disable=too-many-locals
    method list_items (line 409) | def list_items(self):
    method text_tensor_name (line 423) | def text_tensor_name(self):
    method text_tensor_name (line 429) | def text_tensor_name(self, name):
    method utterance_cnt_tensor_name (line 433) | def utterance_cnt_tensor_name(self):
    method length_tensor_name (line 439) | def length_tensor_name(self):
    method length_tensor_name (line 445) | def length_tensor_name(self, name):
    method text_id_tensor_name (line 449) | def text_id_tensor_name(self):
    method text_id_tensor_name (line 455) | def text_id_tensor_name(self, name):
    method added_length (line 459) | def added_length(self):

FILE: texar_repo/texar/data/data_utils.py
  function maybe_download (line 47) | def maybe_download(urls, path, filenames=None, extract=False):
  function _download (line 107) | def _download(url, filename, path):
  function _extract_google_drive_file_id (line 124) | def _extract_google_drive_file_id(url):
  function _download_from_google_drive (line 130) | def _download_from_google_drive(url, filename, path):
  function read_words (line 161) | def read_words(filename, newline_token=None):
  function make_vocab (line 188) | def make_vocab(filenames, max_vocab_size=-1, newline_token=None,
  function count_file_lines (line 248) | def count_file_lines(filenames):

FILE: texar_repo/texar/data/data_utils_test.py
  class CountFileLinesTest (line 19) | class CountFileLinesTest(tf.test.TestCase):
    method test_load_glove (line 23) | def test_load_glove(self):

FILE: texar_repo/texar/data/embedding.py
  function load_word2vec (line 36) | def load_word2vec(filename, vocab, word_vecs):
  function load_glove (line 76) | def load_glove(filename, vocab, word_vecs):
  class Embedding (line 107) | class Embedding(object):
    method __init__ (line 119) | def __init__(self, vocab, hparams=None):
    method default_hparams (line 148) | def default_hparams():
    method word_vecs (line 228) | def word_vecs(self):
    method vector_size (line 234) | def vector_size(self):

FILE: texar_repo/texar/data/embedding_test.py
  class EmbeddingTest (line 34) | class EmbeddingTest(tf.test.TestCase):
    method test_load_glove (line 38) | def test_load_glove(self):
    method test_load_word2vec (line 58) | def test_load_word2vec(self):
    method test_embedding (line 80) | def test_embedding(self):

FILE: texar_repo/texar/data/vocabulary.py
  class SpecialTokens (line 41) | class SpecialTokens(object):
  function _make_defaultdict (line 52) | def _make_defaultdict(keys, values, default_value):
  class Vocab (line 71) | class Vocab(object):
    method __init__ (line 94) | def __init__(self,
    method load (line 110) | def load(self, filename):
    method map_ids_to_tokens (line 174) | def map_ids_to_tokens(self, ids):
    method map_tokens_to_ids (line 187) | def map_tokens_to_ids(self, tokens):
    method map_ids_to_tokens_py (line 200) | def map_ids_to_tokens_py(self, ids):
    method map_tokens_to_ids_py (line 214) | def map_tokens_to_ids_py(self, tokens):
    method id_to_token_map (line 229) | def id_to_token_map(self):
    method token_to_id_map (line 236) | def token_to_id_map(self):
    method id_to_token_map_py (line 243) | def id_to_token_map_py(self):
    method token_to_id_map_py (line 250) | def token_to_id_map_py(self):
    method size (line 257) | def size(self):
    method bos_token (line 263) | def bos_token(self):
    method bos_token_id (line 269) | def bos_token_id(self):
    method eos_token (line 276) | def eos_token(self):
    method eos_token_id (line 282) | def eos_token_id(self):
    method unk_token (line 289) | def unk_token(self):
    method unk_token_id (line 295) | def unk_token_id(self):
    method pad_token (line 301) | def pad_token(self):
    method pad_token_id (line 308) | def pad_token_id(self):
    method special_tokens (line 314) | def special_tokens(self):

FILE: texar_repo/texar/data/vocabulary_test.py
  class VocabularyTest (line 19) | class VocabularyTest(tf.test.TestCase):
    method test_make_defaultdict (line 23) | def test_make_defaultdict(self):
    method test_vocab_construction (line 37) | def test_vocab_construction(self):

FILE: texar_repo/texar/evals/bleu.py
  function _get_ngrams (line 47) | def _get_ngrams(segment, max_order):
  function _maybe_str_to_list (line 66) | def _maybe_str_to_list(list_or_str):
  function _lowercase (line 71) | def _lowercase(str_list):
  function sentence_bleu (line 74) | def sentence_bleu(references, hypothesis, max_order=4, lowercase=False,
  function corpus_bleu (line 103) | def corpus_bleu(list_of_references, hypotheses, max_order=4, lowercase=F...

FILE: texar_repo/texar/evals/bleu_moses.py
  function _maybe_list_to_str (line 43) | def _maybe_list_to_str(list_or_str):
  function _parse_multi_bleu_ret (line 48) | def _parse_multi_bleu_ret(bleu_str, return_all=False):
  function sentence_bleu_moses (line 60) | def sentence_bleu_moses(references, hypothesis, lowercase=False,
  function corpus_bleu_moses (line 86) | def corpus_bleu_moses(list_of_references, hypotheses, lowercase=False,

FILE: texar_repo/texar/evals/bleu_test.py
  class BLEUTest (line 21) | class BLEUTest(tf.test.TestCase):
    method _test_sentence_bleu (line 25) | def _test_sentence_bleu(self, references, hypothesis, lowercase,
    method test_sentence_strings (line 37) | def test_sentence_strings(self):
    method test_sentence_list (line 46) | def test_sentence_list(self):
    method test_sentence_multi_references (line 57) | def test_sentence_multi_references(self):
    method test_sentence_numpy (line 67) | def test_sentence_numpy(self):
    method _test_corpus_bleu (line 80) | def _test_corpus_bleu(self, list_of_references, hypotheses, lowercase,
    method test_corpus_strings (line 104) | def test_corpus_strings(self):

FILE: texar_repo/texar/evals/metrics.py
  function accuracy (line 17) | def accuracy(labels, preds):
  function binary_clas_accuracy (line 31) | def binary_clas_accuracy(pos_preds=None, neg_preds=None):

FILE: texar_repo/texar/hyperparams.py
  function _type_name (line 31) | def _type_name(value):
  class HParams (line 34) | class HParams(object):
    method __init__ (line 151) | def __init__(self, hparams, default_hparams, allow_new_hparam=False):
    method _parse (line 162) | def _parse(hparams, # pylint: disable=too-many-branches, too-many-stat...
    method _parse_value (line 284) | def _parse_value(value, name=None):
    method __getattr__ (line 290) | def __getattr__(self, name):
    method __getitem__ (line 300) | def __getitem__(self, name):
    method __setattr__ (line 305) | def __setattr__(self, name, value):
    method items (line 315) | def items(self):
    method keys (line 320) | def keys(self):
    method __iter__ (line 325) | def __iter__(self):
    method __len__ (line 329) | def __len__(self):
    method __contains__ (line 332) | def __contains__(self, name):
    method __str__ (line 335) | def __str__(self):
    method get (line 341) | def get(self, name, default=None):
    method add_hparam (line 354) | def add_hparam(self, name, value):
    method todict (line 361) | def todict(self):

FILE: texar_repo/texar/hyperparams_test.py
  class HParamsTest (line 19) | class HParamsTest(tf.test.TestCase):
    method test_hparams (line 23) | def test_hparams(self):
    method test_typecheck (line 99) | def test_typecheck(self):
    method test_type_kwargs (line 119) | def test_type_kwargs(self):

FILE: texar_repo/texar/losses/adv_losses.py
  function binary_adversarial_losses (line 23) | def binary_adversarial_losses(real_data,

FILE: texar_repo/texar/losses/adv_losses_test.py
  class AdvLossesTest (line 14) | class AdvLossesTest(tf.test.TestCase):
    method test_binary_adversarial_losses (line 17) | def test_binary_adversarial_losses(self):

FILE: texar_repo/texar/losses/entropy.py
  function _get_entropy (line 34) | def _get_entropy(logits):
  function entropy_with_logits (line 40) | def entropy_with_logits(logits,
  function sequence_entropy_with_logits (line 116) | def sequence_entropy_with_logits(logits,

FILE: texar_repo/texar/losses/losses_utils.py
  function mask_and_reduce (line 38) | def mask_and_reduce(sequence,
  function reduce_batch_time (line 129) | def reduce_batch_time(sequence,
  function reduce_dimensions (line 166) | def reduce_dimensions(tensor, average_axes=None, sum_axes=None, keepdims...

FILE: texar_repo/texar/losses/mle_losses.py
  function sequence_softmax_cross_entropy (line 38) | def sequence_softmax_cross_entropy(labels,
  function sequence_sparse_softmax_cross_entropy (line 119) | def sequence_sparse_softmax_cross_entropy(labels,
  function sequence_sigmoid_cross_entropy (line 208) | def sequence_sigmoid_cross_entropy(labels,
  function binary_sigmoid_cross_entropy (line 306) | def binary_sigmoid_cross_entropy(pos_logits=None,
  function binary_sigmoid_cross_entropy_with_clas (line 384) | def binary_sigmoid_cross_entropy_with_clas(clas_fn,

FILE: texar_repo/texar/losses/mle_losses_test.py
  class MLELossesTest (line 20) | class MLELossesTest(tf.test.TestCase):
    method setUp (line 24) | def setUp(self):
    method _test_sequence_loss (line 40) | def _test_sequence_loss(self, loss_fn, labels, logits, sequence_length):
    method test_sequence_softmax_cross_entropy (line 75) | def test_sequence_softmax_cross_entropy(self):
    method test_sequence_sparse_softmax_cross_entropy (line 82) | def test_sequence_sparse_softmax_cross_entropy(self):
    method test_sequence_sigmoid_cross_entropy (line 89) | def test_sequence_sigmoid_cross_entropy(self):

FILE: texar_repo/texar/losses/pg_losses.py
  function pg_loss_with_logits (line 34) | def pg_loss_with_logits(actions,
  function pg_loss_with_log_probs (line 136) | def pg_loss_with_log_probs(log_probs,

FILE: texar_repo/texar/losses/rewards.py
  function discount_reward (line 37) | def discount_reward(reward,
  function _discount_reward_py_1d (line 122) | def _discount_reward_py_1d(reward, sequence_length, discount=1., dtype=N...
  function _discount_reward_tensor_1d (line 149) | def _discount_reward_tensor_1d(reward, sequence_length,
  function _discount_reward_py_2d (line 174) | def _discount_reward_py_2d(reward, sequence_length=None,
  function _discount_reward_tensor_2d (line 191) | def _discount_reward_tensor_2d(reward, sequence_length=None,

FILE: texar_repo/texar/losses/rewards_test.py
  class RewardTest (line 21) | class RewardTest(tf.test.TestCase):
    method test_discount_reward (line 25) | def test_discount_reward(self):
    method test_discount_reward_py_1d (line 80) | def test_discount_reward_py_1d(self):
    method test_discount_reward_tensor_1d (line 108) | def test_discount_reward_tensor_1d(self):
    method test_discount_reward_py_2d (line 139) | def test_discount_reward_py_2d(self):
    method test_discount_reward_tensor_2d (line 167) | def test_discount_reward_tensor_2d(self):

FILE: texar_repo/texar/losses/rl_losses.py
  function reinforce_loss (line 27) | def reinforce_loss(sample_fn,
  function reinforce_loss_with_MCtree (line 79) | def reinforce_loss_with_MCtree(sample_fn,   # pylint: disable=invalid-name

FILE: texar_repo/texar/models/model_base.py
  class ModelBase (line 30) | class ModelBase(object):
    method __init__ (line 43) | def __init__(self, hparams=None):
    method default_hparams (line 48) | def default_hparams():
    method __call__ (line 56) | def __call__(self, features, labels, params, mode, config=None):
    method _build (line 63) | def _build(self, features, labels, params, mode, config=None):
    method get_input_fn (line 70) | def get_input_fn(self, *args, **kwargs):
    method hparams (line 77) | def hparams(self):

FILE: texar_repo/texar/models/seq2seq/basic_seq2seq.py
  class BasicSeq2seq (line 35) | class BasicSeq2seq(Seq2seqBase):
    method __init__ (line 55) | def __init__(self, data_hparams, hparams=None):
    method default_hparams (line 59) | def default_hparams():
    method _build_decoder (line 71) | def _build_decoder(self):
    method _get_predictions (line 80) | def _get_predictions(self, decoder_results, features, labels, loss=None):
    method embed_source (line 97) | def embed_source(self, features, labels, mode):
    method embed_target (line 102) | def embed_target(self, features, labels, mode):
    method encode (line 107) | def encode(self, features, labels, mode):
    method _connect (line 119) | def _connect(self, encoder_results, features, labels, mode):
    method _decode_train (line 131) | def _decode_train(self, initial_state, encoder_results, features,
    method _decode_infer (line 140) | def _decode_infer(self, initial_state, encoder_results, features,
    method decode (line 165) | def decode(self, encoder_results, features, labels, mode):

FILE: texar_repo/texar/models/seq2seq/seq2seq_base.py
  class Seq2seqBase (line 39) | class Seq2seqBase(ModelBase):
    method __init__ (line 46) | def __init__(self, data_hparams, hparams=None):
    method default_hparams (line 61) | def default_hparams():
    method _build_vocab (line 162) | def _build_vocab(self):
    method _build_embedders (line 167) | def _build_embedders(self):
    method _build_encoder (line 192) | def _build_encoder(self):
    method _build_decoder (line 200) | def _build_decoder(self):
    method _build_connector (line 203) | def _build_connector(self):
    method get_loss (line 212) | def get_loss(self, decoder_results, features, labels):
    method _get_predictions (line 220) | def _get_predictions(self, decoder_results, features, labels, loss=None):
    method _get_train_op (line 223) | def _get_train_op(self, loss):
    method _get_eval_metric_ops (line 230) | def _get_eval_metric_ops(self, decoder_results, features, labels):
    method embed_source (line 233) | def embed_source(self, features, labels, mode):
    method embed_target (line 238) | def embed_target(self, features, labels, mode):
    method encode (line 243) | def encode(self, features, labels, mode):
    method _connect (line 248) | def _connect(self, encoder_results, features, labels, mode):
    method decode (line 253) | def decode(self, encoder_results, features, labels, mode):
    method _build (line 258) | def _build(self, features, labels, params, mode, config=None):
    method get_input_fn (line 290) | def get_input_fn(self, mode, hparams=None): #pylint:disable=arguments-...

FILE: texar_repo/texar/module_base.py
  class ModuleBase (line 32) | class ModuleBase(object):
    method __init__ (line 70) | def __init__(self, hparams=None):
    method default_hparams (line 79) | def default_hparams():
    method _build (line 94) | def _build(self, *args, **kwargs):
    method __call__ (line 106) | def __call__(self, *args, **kwargs):
    method _add_internal_trainable_variables (line 118) | def _add_internal_trainable_variables(self):  # pylint: disable=invali...
    method _add_trainable_variable (line 133) | def _add_trainable_variable(self, variable):
    method variable_scope (line 150) | def variable_scope(self):
    method name (line 156) | def name(self):
    method trainable_variables (line 162) | def trainable_variables(self):
    method hparams (line 173) | def hparams(self):

FILE: texar_repo/texar/modules/classifiers/classifier_base.py
  class ClassifierBase (line 28) | class ClassifierBase(ModuleBase):
    method __init__ (line 32) | def __init__(self, hparams=None):
    method default_hparams (line 36) | def default_hparams():
    method _build (line 43) | def _build(self, inputs, *args, **kwargs):

FILE: texar_repo/texar/modules/classifiers/conv_classifiers.py
  class Conv1DClassifier (line 36) | class Conv1DClassifier(ClassifierBase):
    method __init__ (line 62) | def __init__(self, hparams=None):
    method default_hparams (line 92) | def default_hparams():
    method _build (line 143) | def _build(self,    # pylint: disable=arguments-differ
    method trainable_variables (line 199) | def trainable_variables(self):
    method num_classes (line 210) | def num_classes(self):
    method nn (line 216) | def nn(self): # pylint: disable=invalid-name
    method has_layer (line 221) | def has_layer(self, layer_name):
    method layer_by_name (line 230) | def layer_by_name(self, layer_name):
    method layers_by_name (line 240) | def layers_by_name(self):
    method layers (line 246) | def layers(self):
    method layer_names (line 252) | def layer_names(self):
    method layer_outputs_by_name (line 257) | def layer_outputs_by_name(self, layer_name):
    method layer_outputs (line 267) | def layer_outputs(self):

FILE: texar_repo/texar/modules/classifiers/conv_classifiers_test.py
  class Conv1DClassifierTest (line 17) | class Conv1DClassifierTest(tf.test.TestCase):
    method test_classifier (line 21) | def test_classifier(self):

FILE: texar_repo/texar/modules/classifiers/rnn_classifiers.py
  class UnidirectionalRNNClassifier (line 49) | class UnidirectionalRNNClassifier(ClassifierBase):
    method __init__ (line 80) | def __init__(self,
    method default_hparams (line 121) | def default_hparams():
    method _build (line 190) | def _build(self,
    method num_classes (line 345) | def num_classes(self):

FILE: texar_repo/texar/modules/classifiers/rnn_classifiers_test.py
  class UnidirectionalRNNClassifierTest (line 20) | class UnidirectionalRNNClassifierTest(tf.test.TestCase):
    method test_trainable_variables (line 24) | def test_trainable_variables(self):
    method test_encode (line 46) | def test_encode(self):
    method test_binary (line 118) | def test_binary(self):

FILE: texar_repo/texar/modules/connectors/connector_base.py
  class ConnectorBase (line 28) | class ConnectorBase(ModuleBase):
    method __init__ (line 49) | def __init__(self, output_size, hparams=None):
    method default_hparams (line 54) | def default_hparams():
    method _build (line 61) | def _build(self, *args, **kwargs):
    method output_size (line 67) | def output_size(self):

FILE: texar_repo/texar/modules/connectors/connectors.py
  function _assert_same_size (line 44) | def _assert_same_size(outputs, output_size):
  function _get_tensor_depth (line 61) | def _get_tensor_depth(x):
  function _mlp_transform (line 70) | def _mlp_transform(inputs, output_size, activation_fn=tf.identity):
  class ConstantConnector (line 124) | class ConstantConnector(ConnectorBase):
    method __init__ (line 155) | def __init__(self, output_size, hparams=None):
    method default_hparams (line 159) | def default_hparams():
    method _build (line 183) | def _build(self, batch_size, value=None):
  class ForwardConnector (line 209) | class ForwardConnector(ConnectorBase):
    method __init__ (line 251) | def __init__(self, output_size, hparams=None):
    method default_hparams (line 255) | def default_hparams():
    method _build (line 273) | def _build(self, inputs):
  class MLPTransformConnector (line 300) | class MLPTransformConnector(ConnectorBase):
    method __init__ (line 351) | def __init__(self, output_size, hparams=None):
    method default_hparams (line 355) | def default_hparams():
    method _build (line 380) | def _build(self, inputs):
  class ReparameterizedStochasticConnector (line 405) | class ReparameterizedStochasticConnector(ConnectorBase):
    method __init__ (line 458) | def __init__(self, output_size, hparams=None):
    method default_hparams (line 462) | def default_hparams():
    method _build (line 487) | def _build(self,
  class StochasticConnector (line 562) | class StochasticConnector(ConnectorBase):
    method __init__ (line 587) | def __init__(self, output_size, hparams=None):
    method default_hparams (line 591) | def default_hparams():
    method _build (line 616) | def _build(self,

FILE: texar_repo/texar/modules/connectors/connectors_test.py
  class TestConnectors (line 23) | class TestConnectors(tf.test.TestCase):
    method setUp (line 27) | def setUp(self):
    method test_constant_connector (line 34) | def test_constant_connector(self):
    method test_forward_connector (line 54) | def test_forward_connector(self):
    method test_mlp_transform_connector (line 61) | def test_mlp_transform_connector(self):
    method test_reparameterized_stochastic_connector (line 75) | def test_reparameterized_stochastic_connector(self):

FILE: texar_repo/texar/modules/decoders/beam_search_decode.py
  function _get_initial_state (line 36) | def _get_initial_state(initial_state,
  function beam_search_decode (line 62) | def beam_search_decode(decoder_or_cell,

FILE: texar_repo/texar/modules/decoders/beam_search_decode_test.py
  class BeamSearchDecodeTest (line 23) | class BeamSearchDecodeTest(tf.test.TestCase):
    method setUp (line 28) | def setUp(self):
    method _test_beam_search (line 45) | def _test_beam_search(
    method test_basic_rnn_decoder_beam_search (line 142) | def test_basic_rnn_decoder_beam_search(self):
    method test_basic_rnn_decoder_given_initial_state (line 159) | def test_basic_rnn_decoder_given_initial_state(self):
    method test_attention_decoder_beam_search (line 183) | def test_attention_decoder_beam_search(self):
    method test_attention_decoder_given_initial_state (line 205) | def test_attention_decoder_given_initial_state(self):

FILE: texar_repo/texar/modules/decoders/rnn_decoder_base.py
  class RNNDecoderBase (line 44) | class RNNDecoderBase(ModuleBase, TFDecoder):
    method __init__ (line 54) | def __init__(self,
    method default_hparams (line 92) | def default_hparams():
    method _build (line 109) | def _build(self,
    method _get_beam_search_cell (line 432) | def _get_beam_search_cell(self, **kwargs):
    method _rnn_output_size (line 436) | def _rnn_output_size(self):
    method batch_size (line 455) | def batch_size(self):
    method output_size (line 459) | def output_size(self):
    method output_dtype (line 465) | def output_dtype(self):
    method initialize (line 470) | def initialize(self, name=None):
    method step (line 475) | def step(self, time, inputs, state, name=None):
    method finalize (line 480) | def finalize(self, outputs, final_state, sequence_lengths):
    method cell (line 486) | def cell(self):
    method zero_state (line 491) | def zero_state(self, batch_size, dtype):
    method state_size (line 499) | def state_size(self):
    method vocab_size (line 506) | def vocab_size(self):
    method output_layer (line 512) | def output_layer(self):

FILE: texar_repo/texar/modules/decoders/rnn_decoder_helpers.py
  function default_helper_train_hparams (line 43) | def default_helper_train_hparams():
  function default_helper_infer_hparams (line 70) | def default_helper_infer_hparams():
  function get_helper (line 98) | def get_helper(helper_type,
  function _get_training_helper (line 142) | def _get_training_helper( #pylint: disable=invalid-name
  class SoftmaxEmbeddingHelper (line 188) | class SoftmaxEmbeddingHelper(TFHelper):
    method __init__ (line 216) | def __init__(self, embedding, start_tokens, end_token, tau,
    method batch_size (line 240) | def batch_size(self):
    method sample_ids_dtype (line 244) | def sample_ids_dtype(self):
    method sample_ids_shape (line 248) | def sample_ids_shape(self):
    method initialize (line 251) | def initialize(self, name=None):
    method sample (line 255) | def sample(self, time, outputs, state, name=None):
    method next_inputs (line 262) | def next_inputs(self, time, outputs, state, sample_ids, name=None):
  class GumbelSoftmaxEmbeddingHelper (line 274) | class GumbelSoftmaxEmbeddingHelper(SoftmaxEmbeddingHelper):
    method __init__ (line 308) | def __init__(self, embedding, start_tokens, end_token, tau,
    method sample (line 314) | def sample(self, time, outputs, state, name=None):

FILE: texar_repo/texar/modules/decoders/rnn_decoders.py
  class BasicRNNDecoderOutput (line 43) | class BasicRNNDecoderOutput(
  class AttentionRNNDecoderOutput (line 71) | class AttentionRNNDecoderOutput(
  class BasicRNNDecoder (line 104) | class BasicRNNDecoder(RNNDecoderBase):
    method __init__ (line 173) | def __init__(self,
    method default_hparams (line 183) | def default_hparams():
    method initialize (line 243) | def initialize(self, name=None):
    method step (line 246) | def step(self, time, inputs, state, name=None):
    method finalize (line 259) | def finalize(self, outputs, final_state, sequence_lengths):
    method output_size (line 263) | def output_size(self):
    method output_dtype (line 272) | def output_dtype(self):
  class AttentionRNNDecoder (line 286) | class AttentionRNNDecoder(RNNDecoderBase):
    method __init__ (line 350) | def __init__(self,
    method default_hparams (line 408) | def default_hparams():
    method _get_beam_search_cell (line 533) | def _get_beam_search_cell(self, beam_width):
    method initialize (line 562) | def initialize(self, name=None):
    method step (line 573) | def step(self, time, inputs, state, name=None):
    method finalize (line 593) | def finalize(self, outputs, final_state, sequence_lengths):
    method _alignments_size (line 596) | def _alignments_size(self):
    method output_size (line 613) | def output_size(self):
    method output_dtype (line 622) | def output_dtype(self):
    method zero_state (line 639) | def zero_state(self, batch_size, dtype):
    method wrapper_zero_state (line 645) | def wrapper_zero_state(self, batch_size, dtype):
    method state_size (line 652) | def state_size(self):
    method wrapper_state_size (line 660) | def wrapper_state_size(self):

FILE: texar_repo/texar/modules/decoders/rnn_decoders_test.py
  class BasicRNNDecoderTest (line 24) | class BasicRNNDecoderTest(tf.test.TestCase):
    method setUp (line 28) | def setUp(self):
    method _test_outputs (line 40) | def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,
    method test_decode_train (line 66) | def test_decode_train(self):
    method test_decode_train_with_tf (line 101) | def test_decode_train_with_tf(self):
    method test_decode_infer (line 166) | def test_decode_infer(self):
  class AttentionRNNDecoderTest (line 204) | class AttentionRNNDecoderTest(tf.test.TestCase):
    method setUp (line 208) | def setUp(self):
    method test_decode_train (line 223) | def test_decode_train(self):
    method test_decode_infer (line 276) | def test_decode_infer(self):
    method test_beam_search_cell (line 326) | def test_beam_search_cell(self):

FILE: texar_repo/texar/modules/decoders/transformer_decoders.py
  class TransformerDecoderOutput (line 49) | class TransformerDecoderOutput(
  class TransformerDecoder (line 62) | class TransformerDecoder(ModuleBase):
    method __init__ (line 83) | def __init__(self, embedding, hparams=None):
    method default_hparams (line 141) | def default_hparams():
    method _prepare_tokens_to_embeds (line 248) | def _prepare_tokens_to_embeds(self, tokens):
    method _symbols_to_logits_fn (line 253) | def _symbols_to_logits_fn(self, embedding_fn, max_length):
    method _build (line 286) | def _build(self,    # pylint: disable=arguments-differ
    method _self_attention_stack (line 488) | def _self_attention_stack(self,
    method _build_output_layer (line 551) | def _build_output_layer(self, dim):
    method _init_cache (line 577) | def _init_cache(self, memory, memory_attention_bias):
    method _infer_decoding (line 593) | def _infer_decoding(self,
    method _beam_decode (line 672) | def _beam_decode(self,

FILE: texar_repo/texar/modules/decoders/transformer_decoders_test.py
  class TransformerDecoderTest (line 19) | class TransformerDecoderTest(tf.test.TestCase):
    method setUp (line 23) | def setUp(self):
    method test_train (line 43) | def test_train(self):
    method test_infer_greedy (line 65) | def test_infer_greedy(self):
    method test_infer_sample (line 85) | def test_infer_sample(self):
    method test_beam_search (line 106) | def test_beam_search(self):

FILE: texar_repo/texar/modules/embedders/embedder_base.py
  class EmbedderBase (line 33) | class EmbedderBase(ModuleBase):
    method __init__ (line 45) | def __init__(self, num_embeds=None, hparams=None):
    method _init_parameterized_embedding (line 51) | def _init_parameterized_embedding(self, init_value, num_embeds, hparams):
    method _get_dropout_layer (line 64) | def _get_dropout_layer(self, hparams, ids_rank=None, dropout_input=None,
    method default_hparams (line 93) | def default_hparams():
    method _build (line 106) | def _build(self, *args, **kwargs):
    method num_embeds (line 110) | def num_embeds(self):

FILE: texar_repo/texar/modules/embedders/embedder_utils.py
  function default_embedding_hparams (line 32) | def default_embedding_hparams():
  function get_embedding (line 168) | def get_embedding(hparams=None,
  function soft_embedding_lookup (line 218) | def soft_embedding_lookup(embedding, soft_ids):

FILE: texar_repo/texar/modules/embedders/embedder_utils_test.py
  class GetEmbeddingTest (line 17) | class GetEmbeddingTest(tf.test.TestCase):
    method test_get_embedding (line 20) | def test_get_embedding(self):

FILE: texar_repo/texar/modules/embedders/embedders.py
  class WordEmbedder (line 33) | class WordEmbedder(EmbedderBase):
    method __init__ (line 96) | def __init__(self, init_value=None, vocab_size=None, hparams=None):
    method default_hparams (line 117) | def default_hparams():
    method _build (line 191) | def _build(self, ids=None, soft_ids=None, mode=None, **kwargs):
    method embedding (line 256) | def embedding(self):
    method dim (line 262) | def dim(self):
    method vocab_size (line 268) | def vocab_size(self):

FILE: texar_repo/texar/modules/embedders/embedders_test.py
  class EmbedderTest (line 21) | class EmbedderTest(tf.test.TestCase):
    method _test_word_embedder (line 25) | def _test_word_embedder(self, hparams):
    method _test_position_embedder (line 69) | def _test_position_embedder(self, hparams):
    method test_embedder (line 101) | def test_embedder(self):
    method test_embedder_multi_calls (line 161) | def test_embedder_multi_calls(self):
    method test_word_embedder_soft_ids (line 185) | def test_word_embedder_soft_ids(self):

FILE: texar_repo/texar/modules/embedders/position_embedders.py
  class PositionEmbedder (line 38) | class PositionEmbedder(EmbedderBase):
    method __init__ (line 66) | def __init__(self, init_value=None, position_size=None, hparams=None):
    method default_hparams (line 87) | def default_hparams():
    method _build (line 121) | def _build(self, positions=None, sequence_length=None, mode=None, **kw...
    method embedding (line 205) | def embedding(self):
    method dim (line 211) | def dim(self):
    method position_size (line 217) | def position_size(self):
  class SinusoidsPositionEmbedder (line 223) | class SinusoidsPositionEmbedder(EmbedderBase):
    method __init__ (line 246) | def __init__(self, hparams=None):
    method default_hparams (line 249) | def default_hparams(self):
    method _build (line 272) | def _build(self, positions):

FILE: texar_repo/texar/modules/encoders/conv_encoders.py
  class Conv1DEncoder (line 29) | class Conv1DEncoder(Conv1DNetwork, EncoderBase):
    method __init__ (line 38) | def __init__(self, hparams=None): # pylint: disable=super-init-not-called
    method default_hparams (line 42) | def default_hparams():

FILE: texar_repo/texar/modules/encoders/conv_encoders_test.py
  class Conv1DEncoderTest (line 17) | class Conv1DEncoderTest(tf.test.TestCase):
    method test_encode (line 21) | def test_encode(self):
    method test_unknown_seq_length (line 66) | def test_unknown_seq_length(self):

FILE: texar_repo/texar/modules/encoders/encoder_base.py
  class EncoderBase (line 28) | class EncoderBase(ModuleBase):
    method __init__ (line 32) | def __init__(self, hparams=None):
    method default_hparams (line 36) | def default_hparams():
    method _build (line 43) | def _build(self, inputs, *args, **kwargs):

FILE: texar_repo/texar/modules/encoders/hierarchical_encoders.py
  class HierarchicalRNNEncoder (line 37) | class HierarchicalRNNEncoder(EncoderBase):
    method __init__ (line 66) | def __init__(self, encoder_major=None, encoder_minor=None,
    method default_hparams (line 103) | def default_hparams():
    method _build (line 170) | def _build(self,
    method tile_initial_state_minor (line 290) | def tile_initial_state_minor(initial_state, order, inputs_shape):
    method _get_flatten_order (line 323) | def _get_flatten_order(order, kwargs_minor, kwargs_major, shape):
    method flatten (line 350) | def flatten(x):
    method encoder_major (line 368) | def encoder_major(self):
    method encoder_minor (line 374) | def encoder_minor(self):

FILE: texar_repo/texar/modules/encoders/hierarchical_encoders_test.py
  class HierarchicalRNNEncoderTest (line 17) | class HierarchicalRNNEncoderTest(tf.test.TestCase):
    method test_trainable_variables (line 21) | def test_trainable_variables(self):
    method test_encode (line 36) | def test_encode(self):
    method test_order (line 57) | def test_order(self):
    method test_depack (line 80) | def test_depack(self):
    method test_encoder_minor_as_birnn (line 110) | def test_encoder_minor_as_birnn(self):

FILE: texar_repo/texar/modules/encoders/multihead_attention.py
  class MultiheadAttentionEncoder (line 36) | class MultiheadAttentionEncoder(EncoderBase):
    method __init__ (line 48) | def __init__(self, hparams=None):
    method default_hparams (line 70) | def default_hparams():
    method _build (line 121) | def _build(self, queries, memory, memory_attention_bias,
    method _split_heads (line 200) | def _split_heads(self, x):
    method _combine_heads (line 211) | def _combine_heads(self, x):

FILE: texar_repo/texar/modules/encoders/rnn_encoders.py
  function _default_output_layer_hparams (line 44) | def _default_output_layer_hparams():
  function _build_dense_output_layer (line 58) | def _build_dense_output_layer(hparams):
  function _forward_single_output_layer (line 94) | def _forward_single_output_layer(inputs, input_size, output_layer):
  function _apply_dropout (line 115) | def _apply_dropout(inputs, time_major, hparams, training):
  function _forward_output_layers (line 131) | def _forward_output_layers(inputs, input_size, output_layer, time_major,
  function _apply_rnn_encoder_output_layer (line 183) | def _apply_rnn_encoder_output_layer(output_layer, time_major, hparams, m...
  class RNNEncoderBase (line 201) | class RNNEncoderBase(EncoderBase):
    method __init__ (line 211) | def __init__(self, hparams=None):
    method default_hparams (line 215) | def default_hparams():
    method _build (line 228) | def _build(self, inputs, *args, **kwargs):
  class UnidirectionalRNNEncoder (line 242) | class UnidirectionalRNNEncoder(RNNEncoderBase):
    method __init__ (line 280) | def __init__(self,
    method default_hparams (line 306) | def default_hparams():
    method _build (line 409) | def _build(self,
    method cell (line 532) | def cell(self):
    method state_size (line 538) | def state_size(self):
    method output_layer (line 546) | def output_layer(self):
  class BidirectionalRNNEncoder (line 551) | class BidirectionalRNNEncoder(RNNEncoderBase):
    method __init__ (line 597) | def __init__(self,
    method default_hparams (line 647) | def default_hparams():
    method _build (line 730) | def _build(self,
    method cell_fw (line 878) | def cell_fw(self):
    method cell_bw (line 884) | def cell_bw(self):
    method state_size_fw (line 890) | def state_size_fw(self):
    method state_size_bw (line 898) | def state_size_bw(self):
    method output_layer_fw (line 906) | def output_layer_fw(self):
    method output_layer_bw (line 912) | def output_layer_bw(self):

FILE: texar_repo/texar/modules/encoders/rnn_encoders_test.py
  class UnidirectionalRNNEncoderTest (line 20) | class UnidirectionalRNNEncoderTest(tf.test.TestCase):
    method test_trainable_variables (line 24) | def test_trainable_variables(self):
    method test_encode (line 64) | def test_encode(self):
    method test_encode_with_embedder (line 113) | def test_encode_with_embedder(self):
  class BidirectionalRNNEncoderTest (line 129) | class BidirectionalRNNEncoderTest(tf.test.TestCase):
    method test_trainable_variables (line 133) | def test_trainable_variables(self):
    method test_encode (line 178) | def test_encode(self):

FILE: texar_repo/texar/modules/encoders/transformer_encoders.py
  function default_transformer_poswise_net_hparams (line 42) | def default_transformer_poswise_net_hparams(output_dim=512):
  class TransformerEncoder (line 114) | class TransformerEncoder(EncoderBase):
    method __init__ (line 128) | def __init__(self, hparams=None):
    method default_hparams (line 176) | def default_hparams():
    method _build (line 292) | def _build(self, inputs, sequence_length, mode=None):

FILE: texar_repo/texar/modules/memory/embed_fns.py
  function default_memnet_embed_fn_hparams (line 29) | def default_memnet_embed_fn_hparams():

FILE: texar_repo/texar/modules/memory/memory_network.py
  class MemNetSingleLayer (line 38) | class MemNetSingleLayer(ModuleBase):
    method __init__ (line 49) | def __init__(self, H=None, hparams=None):
    method default_hparams (line 55) | def default_hparams():
    method _build (line 73) | def _build(self, u, m, c, **kwargs):
  class MemNetBase (line 108) | class MemNetBase(ModuleBase):
    method __init__ (line 150) | def __init__(self,
    method _build_embed_fn (line 173) | def _build_embed_fn(self, input_embed_fn, output_embed_fn, query_embed...
    method get_default_embed_fn (line 207) | def get_default_embed_fn(self, memory_size, embed_fn_hparams):
    method default_hparams (line 303) | def default_hparams():
    method _build (line 376) | def _build(self, memory, query, **kwargs):
    method memory_size (line 380) | def memory_size(self):
    method raw_memory_dim (line 386) | def raw_memory_dim(self):
    method memory_dim (line 392) | def memory_dim(self):
  class MemNetRNNLike (line 398) | class MemNetRNNLike(MemNetBase):
    method __init__ (line 445) | def __init__(self,
    method default_hparams (line 465) | def default_hparams():
    method _build (line 530) | def _build(self, memory=None, query=None, soft_memory=None, soft_query...

FILE: texar_repo/texar/modules/memory/memory_network_test.py
  class MemNetRNNLikeTest (line 17) | class MemNetRNNLikeTest(tf.test.TestCase):
    method _test_memory_dim (line 21) | def _test_memory_dim(self, combine_mode='add', soft_memory=False,
    method test_memory_dim (line 87) | def test_memory_dim(self):

FILE: texar_repo/texar/modules/networks/conv_networks.py
  function _to_list (line 38) | def _to_list(value, name=None, list_length=None):
  class Conv1DNetwork (line 56) | class Conv1DNetwork(FeedForwardNetworkBase):
    method __init__ (line 88) | def __init__(self, hparams=None):
    method default_hparams (line 96) | def default_hparams():
    method _build_pool_hparams (line 301) | def _build_pool_hparams(self):
    method _build_conv1d_hparams (line 329) | def _build_conv1d_hparams(self, pool_hparams):
    method _build_dense_hparams (line 388) | def _build_dense_hparams(self):
    method _build_layer_hparams (line 417) | def _build_layer_hparams(self):
    method _build (line 453) | def _build(self,    # pylint: disable=arguments-differ

FILE: texar_repo/texar/modules/networks/conv_networks_test.py
  class Conv1DNetworkTest (line 17) | class Conv1DNetworkTest(tf.test.TestCase):
    method test_feedforward (line 21) | def test_feedforward(self):
    method test_unknown_seq_length (line 66) | def test_unknown_seq_length(self):
    method test_mask_input (line 115) | def test_mask_input(self):

FILE: texar_repo/texar/modules/networks/network_base.py
  function _build_layers (line 38) | def _build_layers(network, layers=None, layer_hparams=None):
  class FeedForwardNetworkBase (line 68) | class FeedForwardNetworkBase(ModuleBase):
    method __init__ (line 80) | def __init__(self, hparams=None):
    method default_hparams (line 90) | def default_hparams():
    method _build (line 103) | def _build(self, inputs, mode=None):
    method append_layer (line 141) | def append_layer(self, layer):
    method has_layer (line 162) | def has_layer(self, layer_name):
    method layer_by_name (line 171) | def layer_by_name(self, layer_name):
    method layers_by_name (line 181) | def layers_by_name(self):
    method layers (line 187) | def layers(self):
    method layer_names (line 193) | def layer_names(self):
    method layer_outputs_by_name (line 198) | def layer_outputs_by_name(self, layer_name):
    method layer_outputs (line 208) | def layer_outputs(self):

FILE: texar_repo/texar/modules/networks/networks.py
  class FeedForwardNetwork (line 31) | class FeedForwardNetwork(FeedForwardNetworkBase):
    method __init__ (line 63) | def __init__(self, layers=None, hparams=None):
    method default_hparams (line 71) | def default_hparams():

FILE: texar_repo/texar/modules/networks/networks_test.py
  class FeedForwardNetworkTest (line 15) | class FeedForwardNetworkTest(tf.test.TestCase):
    method test_feedforward (line 20) | def test_feedforward(self):

FILE: texar_repo/texar/modules/policies/policy_nets.py
  class PolicyNetBase (line 37) | class PolicyNetBase(ModuleBase):
    method __init__ (line 56) | def __init__(self,
    method default_hparams (line 66) | def default_hparams():
    method _build_network (line 133) | def _build_network(self, network, kwargs):
    method _build (line 144) | def _build(self, inputs, mode=None): # pylint: disable=arguments-differ
    method network (line 148) | def network(self):
  class CategoricalPolicyNet (line 155) | class CategoricalPolicyNet(PolicyNetBase):
    method __init__ (line 184) | def __init__(self,
    method default_hparams (line 199) | def default_hparams():
    method _append_output_layer (line 263) | def _append_output_layer(self):
    method _build (line 278) | def _build(self, inputs, mode=None):
    method action_space (line 327) | def action_space(self):

FILE: texar_repo/texar/modules/policies/policy_nets_test.py
  class CategoricalPolicyNetTest (line 15) | class CategoricalPolicyNetTest(tf.test.TestCase):
    method test_categorical_policy (line 19) | def test_categorical_policy(self):

FILE: texar_repo/texar/modules/qnets/qnets.py
  class QNetBase (line 35) | class QNetBase(ModuleBase):
    method __init__ (line 55) | def __init__(self,
    method default_hparams (line 65) | def default_hparams():
    method _build_network (line 126) | def _build_network(self, network, kwargs):
    method _build (line 137) | def _build(self, inputs, mode=None): # pylint: disable=arguments-differ
    method network (line 141) | def network(self):
  class CategoricalQNet (line 147) | class CategoricalQNet(QNetBase):
    method __init__ (line 172) | def __init__(self,
    method default_hparams (line 187) | def default_hparams():
    method _append_output_layer (line 232) | def _append_output_layer(self):
    method _build (line 246) | def _build(self, inputs, mode=None):
    method action_space (line 277) | def action_space(self):

FILE: texar_repo/texar/run/executor.py
  class Executor (line 31) | class Executor(object):
    method __init__ (line 75) | def __init__(self,
    method _get_train_spec (line 97) | def _get_train_spec(self, max_steps=None):
    method _get_eval_spec (line 109) | def _get_eval_spec(self, steps):
    method train (line 121) | def train(self, max_steps=None):
    method evaluate (line 137) | def evaluate(self, steps=None, checkpoint_path=None):
    method train_and_evaluate (line 158) | def train_and_evaluate(self, max_train_steps=None, eval_steps=None):

FILE: texar_repo/texar/run/executor_test.py
  class ExecutorTest (line 20) | class ExecutorTest(tf.test.TestCase):
    method setUp (line 24) | def setUp(self):
    method test_execute_seq2seq (line 60) | def test_execute_seq2seq(self):

FILE: texar_repo/texar/utils/average_recorder.py
  class _SingleAverageRecorder (line 31) | class _SingleAverageRecorder(object):
    method __init__ (line 41) | def __init__(self, size=None, name=None):
    method add (line 51) | def add(self, record, weight=None):
    method avg (line 80) | def avg(self):
    method reset (line 87) | def reset(self):
    method to_str (line 95) | def to_str(self, precision=None):
    method name (line 119) | def name(self):
  class AverageRecorder (line 124) | class AverageRecorder(object):
    method __init__ (line 160) | def __init__(self, size=None):
    method _to_dict (line 168) | def _to_dict(self, record):
    method add (line 177) | def add(self, record, weight=None):
    method avg (line 228) | def avg(self, id_or_name=None):
    method reset (line 267) | def reset(self, id_or_name=None):
    method to_str (line 285) | def to_str(self, precision=None, delimiter=' '):

FILE: texar_repo/texar/utils/average_recorder_test.py
  class AverageRecorderTest (line 14) | class AverageRecorderTest(tf.test.TestCase):
    method test_single_average_recoder (line 18) | def test_single_average_recoder(self):
    method test_average_recorder (line 49) | def test_average_recorder(self):

FILE: texar_repo/texar/utils/beam_search.py
  function _merge_beam_dim (line 35) | def _merge_beam_dim(tensor):
  function _unmerge_beam_dim (line 50) | def _unmerge_beam_dim(tensor, batch_size, beam_size):
  function _expand_to_beam_size (line 66) | def _expand_to_beam_size(tensor, beam_size):
  function get_state_shape_invariants (line 83) | def get_state_shape_invariants(tensor):
  function log_prob_from_logits (line 91) | def log_prob_from_logits(logits):
  function compute_batch_indices (line 95) | def compute_batch_indices(batch_size, beam_size):
  function compute_topk_scores_and_seq (line 114) | def compute_topk_scores_and_seq(sequences, scores, scores_to_gather, flags,
  function beam_search (line 186) | def beam_search(symbols_to_logits_fn,

FILE: texar_repo/texar/utils/dtypes.py
  function get_tf_dtype (line 39) | def get_tf_dtype(dtype): # pylint: disable=too-many-return-statements
  function is_callable (line 75) | def is_callable(x):
  function is_str (line 84) | def is_str(x):
  function is_placeholder (line 90) | def is_placeholder(x):
  function maybe_hparams_to_dict (line 99) | def maybe_hparams_to_dict(hparams):
  function _maybe_list_to_array (line 110) | def _maybe_list_to_array(str_list, dtype_as):
  function compat_as_text (line 118) | def compat_as_text(str_):

FILE: texar_repo/texar/utils/exceptions.py
  class TexarError (line 27) | class TexarError(Exception):

FILE: texar_repo/texar/utils/mode.py
  function maybe_global_mode (line 37) | def maybe_global_mode(mode):
  function is_train_mode (line 46) | def is_train_mode(mode):
  function is_eval_mode (line 56) | def is_eval_mode(mode):
  function is_predict_mode (line 66) | def is_predict_mode(mode):
  function is_train_mode_py (line 76) | def is_train_mode_py(mode, default=True):
  function is_eval_mode_py (line 95) | def is_eval_mode_py(mode, default=False):
  function is_predict_mode_py (line 114) | def is_predict_mode_py(mode, default=False):
  function switch_dropout (line 133) | def switch_dropout(dropout_keep_prob, mode=None):

FILE: texar_repo/texar/utils/mode_test.py
  class UtilsTest (line 16) | class UtilsTest(tf.test.TestCase):
    method test_mode (line 20) | def test_mode(self):

FILE: texar_repo/texar/utils/shapes.py
  function transpose_batch_time (line 44) | def transpose_batch_time(inputs):
  function get_batch_size (line 62) | def get_batch_size(tensor):
  function get_rank (line 69) | def get_rank(tensor):
  function mask_sequences (line 91) | def mask_sequences(sequence,
  function _mask_sequences_tensor (line 138) | def _mask_sequences_tensor(sequence,
  function _mask_sequences_py (line 191) | def _mask_sequences_py(sequence,
  function flatten (line 248) | def flatten(tensor, preserve_dims, flattened_dim=None):
  function shape_list (line 276) | def shape_list(x):
  function pad_and_concat (line 301) | def pad_and_concat(values, axis, rank=None, pad_axis=None,

FILE: texar_repo/texar/utils/shapes_test.py
  class ShapesTest (line 17) | class ShapesTest(tf.test.TestCase):
    method test_mask_sequences (line 21) | def test_mask_sequences(self):
    method test_pad_and_concat (line 32) | def test_pad_and_concat(self):

FILE: texar_repo/texar/utils/transformer_attentions.py
  function attention_bias_lower_triangle (line 36) | def attention_bias_lower_triangle(length, bias_value=-1e18):
  function attention_bias_local (line 48) | def attention_bias_local(length, max_backward, max_forward, bias_value=-...
  function attention_bias_ignore_padding (line 74) | def attention_bias_ignore_padding(memory_padding, bias_value=-1e18):
  function _ones_matrix_band_part (line 88) | def _ones_matrix_band_part(rows, cols, num_lower, num_upper,

FILE: texar_repo/texar/utils/transformer_utils.py
  class PadRemover (line 30) | class PadRemover(object):
    method __init__ (line 52) | def __init__(self, pad_mask):
    method remove (line 74) | def remove(self, x):
    method restore (line 96) | def restore(self, x):
  function embedding_to_padding (line 115) | def embedding_to_padding(emb):
  function smoothing_cross_entropy (line 129) | def smoothing_cross_entropy(logits,

FILE: texar_repo/texar/utils/utils.py
  function _expand_name (line 81) | def _expand_name(name):
  function _inspect_getargspec (line 89) | def _inspect_getargspec(fn):
  function get_args (line 98) | def get_args(fn):
  function get_default_arg_values (line 118) | def get_default_arg_values(fn):
  function check_or_get_class (line 137) | def check_or_get_class(class_or_name, module_path=None, superclass=None):
  function get_class (line 168) | def get_class(class_name, module_paths=None):
  function check_or_get_instance (line 206) | def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
  function get_instance (line 244) | def get_instance(class_or_name, kwargs, module_paths=None):
  function check_or_get_instance_with_redundant_kwargs (line 283) | def check_or_get_instance_with_redundant_kwargs(
  function get_instance_with_redundant_kwargs (line 323) | def get_instance_with_redundant_kwargs(
  function get_function (line 360) | def get_function(fn_or_name, module_paths=None):
  function call_function_with_redundant_kwargs (line 396) | def call_function_with_redundant_kwargs(fn, kwargs):
  function get_instance_kwargs (line 428) | def get_instance_kwargs(kwargs, hparams):
  function dict_patch (line 454) | def dict_patch(tgt_dict, src_dict):
  function dict_lookup (line 478) | def dict_lookup(dict_, keys, default=None):
  function dict_fetch (line 498) | def dict_fetch(src_dict, tgt_dict_or_keys):
  function dict_pop (line 526) | def dict_pop(dict_, pop_keys, default=None):
  function flatten_dict (line 544) | def flatten_dict(dict_, parent_key="", sep="."):
  function default_str (line 573) | def default_str(str_, default_str):
  function uniquify_str (line 589) | def uniquify_str(str_, str_set):
  function _recur_split (line 623) | def _recur_split(s, dtype_as):
  function strip_token (line 633) | def strip_token(str_, token, is_token_list=False, compat=True):
  function strip_eos (line 694) | def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
  function strip_bos (line 741) | def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
  function strip_special_tokens (line 790) | def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
  function str_join (line 853) | def str_join(tokens, sep=' ', compat=True):
  function map_ids_to_strs (line 882) | def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
  function ceildiv (line 946) | def ceildiv(a, b):
  function straight_through (line 960) | def straight_through(fw_tensor, bw_tensor):

FILE: texar_repo/texar/utils/utils_io.py
  function _load_config_python (line 65) | def _load_config_python(fname):
  function _load_config_yaml (line 75) | def _load_config_yaml(fname):
  function load_config_single (line 80) | def load_config_single(fname, config=None):
  function load_config (line 114) | def load_config(config_path, config=None):
  function write_paired_text (line 155) | def write_paired_text(src, tgt, fname, append=False, mode='h', sep='\t',
  function maybe_create_dir (line 223) | def maybe_create_dir(dirname):
  function get_files (line 232) | def get_files(file_paths):

FILE: texar_repo/texar/utils/utils_test.py
  class UtilsTest (line 21) | class UtilsTest(tf.test.TestCase):
    method test_dict_patch (line 25) | def test_dict_patch(self):
    method test_strip_token (line 51) | def test_strip_token(self):
    method test_strip_bos (line 74) | def test_strip_bos(self):
    method test_strip_eos (line 88) | def test_strip_eos(self):
    method test_strip_special_tokens (line 101) | def test_strip_special_tokens(self):
    method test_str_join (line 114) | def test_str_join(self):
    method test_uniquify_str (line 133) | def test_uniquify_str(self):
    method test_map_ids_to_strs (line 145) | def test_map_ids_to_strs(self):

FILE: texar_repo/texar/utils/variables.py
  function get_unique_named_variable_scope (line 33) | def get_unique_named_variable_scope(base_name):
  function add_variable (line 53) | def add_variable(variable, var_list):
  function collect_trainable_variables (line 67) | def collect_trainable_variables(modules):
Condensed preview — 323 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (3,133K chars).
[
  {
    "path": ".ipynb_checkpoints/Untitled-checkpoint.ipynb",
    "chars": 72,
    "preview": "{\n \"cells\": [],\n \"metadata\": {},\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "BERT_SUMM.ipynb",
    "chars": 59537,
    "preview": "{\n  \"nbformat\": 4,\n  \"nbformat_minor\": 0,\n  \"metadata\": {\n    \"colab\": {\n      \"name\": \"BERT SUMM.ipynb\",\n      \"version"
  },
  {
    "path": "Inference.py",
    "chars": 2748,
    "preview": "from flask import Flask,request,render_template\nimport requests \nimport json\nfrom collections import OrderedDict\nimport "
  },
  {
    "path": "Readme.md",
    "chars": 1513,
    "preview": "<h3>Abstractive summarization using bert as encoder and transformer decoder</h3>\n\nI have used a text generation library "
  },
  {
    "path": "bnb_4bit_training.ipynb",
    "chars": 851120,
    "preview": "{\n  \"nbformat\": 4,\n  \"nbformat_minor\": 0,\n  \"metadata\": {\n    \"colab\": {\n      \"provenance\": [],\n      \"gpuType\": \"T4\",\n"
  },
  {
    "path": "config.py",
    "chars": 1766,
    "preview": "import texar as tx\ndcoder_config = {\n    'dim': 768,\n    'num_blocks': 6,\n    'multihead_attention': {\n        'num_head"
  },
  {
    "path": "data/eval_story.txt",
    "chars": 2027,
    "preview": "The new question is that we know how many class data includes, but what if number of class is unknow in data. This is ki"
  },
  {
    "path": "data/eval_summ.txt",
    "chars": 2027,
    "preview": "The new question is that we know how many class data includes, but what if number of class is unknow in data. This is ki"
  },
  {
    "path": "data/train_story.txt",
    "chars": 2027,
    "preview": "The new question is that we know how many class data includes, but what if number of class is unknow in data. This is ki"
  },
  {
    "path": "data/train_summ.txt",
    "chars": 2027,
    "preview": "The new question is that we know how many class data includes, but what if number of class is unknow in data. This is ki"
  },
  {
    "path": "main.py",
    "chars": 5487,
    "preview": "import sys\n\nif not 'texar_repo' in sys.path:\n  sys.path += ['texar_repo']\nimport tensorflow as tf\nimport texar as tx\nimp"
  },
  {
    "path": "model.py",
    "chars": 5779,
    "preview": "\nimport sys\nif not 'texar_repo' in sys.path:\n  sys.path += ['texar_repo']\n\nfrom config import *\nfrom preprocess import f"
  },
  {
    "path": "models/logging.txt",
    "chars": 2172,
    "preview": "2019-03-08 20:02:04,048:INFO:Begin running with train_and_evaluate mode\n2019-03-08 20:03:50,512:INFO:Begin running with "
  },
  {
    "path": "preprocess.py",
    "chars": 14719,
    "preview": "\nimport sys\nif not 'texar_repo' in sys.path:\n  sys.path += ['texar_repo']\nfrom config import *\nfrom texar_repo.examples."
  },
  {
    "path": "texar_repo/.gitignore",
    "chars": 3873,
    "preview": "# Created by https://www.gitignore.io/api/python\n\n### Python ###\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*."
  },
  {
    "path": "texar_repo/.pylintrc",
    "chars": 13741,
    "preview": "[MASTER]\n\n# Specify a configuration file.\n#rcfile=\n\n# Python code to execute, usually for sys.path manipulation such as\n"
  },
  {
    "path": "texar_repo/.travis.yml",
    "chars": 554,
    "preview": "sudo: required\nlanguage: python\npython:\n  - \"2.7\"\n  - \"3.5\"\n  - \"3.6\"\n\ninstall:\n  - pip install -e .[tensorflow-cpu]\n  -"
  },
  {
    "path": "texar_repo/CHANGELOG.md",
    "chars": 297,
    "preview": "\n## [Unreleased]\n\n### New features\n\n* [2019-01-02] Support distributed-GPU training. See the [example](https://github.co"
  },
  {
    "path": "texar_repo/LICENSE",
    "chars": 11357,
    "preview": "                                 Apache License\n                           Version 2.0, January 2004\n                   "
  },
  {
    "path": "texar_repo/README.md",
    "chars": 6360,
    "preview": "<div align=\"center\">\n   <img src=\"https://zhitinghu.github.io/texar_web/images/logo_h_035.png\"><br><br>\n</div>\n \n-------"
  },
  {
    "path": "texar_repo/bin/average_checkpoints.py",
    "chars": 3383,
    "preview": "\"\"\"Checkpoint averaging script.\"\"\"\n\n# This script is modified version of\n# https://github.com/tensorflow/tensor2tensor/b"
  },
  {
    "path": "texar_repo/bin/train.py",
    "chars": 8430,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/bin/utils/README.md",
    "chars": 2119,
    "preview": "\nThis directory contains several utilities for, e.g., data pre-processing. \n\nInstructions of using BPE and WPM encoding "
  },
  {
    "path": "texar_repo/bin/utils/apply_bpe",
    "chars": 10755,
    "preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Rico Sennrich\n# flake8: noqa\n\n\"\"\"Use operations learned with lea"
  },
  {
    "path": "texar_repo/bin/utils/learn_bpe",
    "chars": 9846,
    "preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Rico Sennrich\n# flake8: noqa\n\n\"\"\"Use byte pair encoding (BPE) to"
  },
  {
    "path": "texar_repo/bin/utils/make_vocab.py",
    "chars": 3190,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/bin/utils/multi-bleu.perl",
    "chars": 4826,
    "preview": "#!/usr/bin/env perl\n#\n# This file is part of moses.  Its use is licensed under the GNU Lesser General\n# Public License v"
  },
  {
    "path": "texar_repo/bin/utils/spm_decode",
    "chars": 1024,
    "preview": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import pri"
  },
  {
    "path": "texar_repo/bin/utils/spm_encode",
    "chars": 1080,
    "preview": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import pri"
  },
  {
    "path": "texar_repo/bin/utils/spm_train",
    "chars": 626,
    "preview": "#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\nimport sentencepiece as spm\n\nparser = ArgumentParser(descript"
  },
  {
    "path": "texar_repo/config.py",
    "chars": 1491,
    "preview": "import texar as tx\ndcoder_config = {\n    'dim': 768,\n    'num_blocks': 6,\n    'multihead_attention': {\n        'num_head"
  },
  {
    "path": "texar_repo/docs/Makefile",
    "chars": 8060,
    "preview": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD "
  },
  {
    "path": "texar_repo/docs/_static/css/custom_theme.css",
    "chars": 1358,
    "preview": "/* This style sheet is heavily inspired by PyTorch docs . */\n/* https://github.com/pytorch/pytorch/blob/master/docs/sour"
  },
  {
    "path": "texar_repo/docs/code/agents.rst",
    "chars": 1223,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nAgents\n*******\n\n\nSequence Agents\n=================\n\n:hidden:`SeqPGAgent`\n~~"
  },
  {
    "path": "texar_repo/docs/code/context.rst",
    "chars": 581,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nContext\n********\n\nGlobal Mode\n===========\n\n:hidden:`global_mode`\n~~~~~~~~~~"
  },
  {
    "path": "texar_repo/docs/code/core.rst",
    "chars": 3543,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nCore\n****\n\n\nCells\n=====\n\n:hidden:`default_rnn_cell_hparams`\n~~~~~~~~~~~~~~~"
  },
  {
    "path": "texar_repo/docs/code/data.rst",
    "chars": 3168,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nData\n*******\n\nVocabulary\n==========\n\n:hidden:`SpecialTokens`\n~~~~~~~~~~~~~~"
  },
  {
    "path": "texar_repo/docs/code/evals.rst",
    "chars": 805,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nEvaluations\n***********\n\n\nBLEU\n==========\n\n:hidden:`sentence_bleu`\n~~~~~~~~"
  },
  {
    "path": "texar_repo/docs/code/hyperparams.rst",
    "chars": 97,
    "preview": ".. role:: hidden\n    :class: hidden\n\nHParams\n*******\n\n.. autoclass:: texar.HParams\n    :members:\n"
  },
  {
    "path": "texar_repo/docs/code/losses.rst",
    "chars": 2262,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nLoss Functions\n**************\n\nMLE Loss\n==========\n\n:hidden:`sequence_softm"
  },
  {
    "path": "texar_repo/docs/code/models.rst",
    "chars": 356,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nModels\n********\n\nModelBase\n=============\n\n.. autoclass:: texar.models.Model"
  },
  {
    "path": "texar_repo/docs/code/modules.rst",
    "chars": 6510,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nModules\n*******\n\nModuleBase\n===========\n\n.. autoclass:: texar.ModuleBase\n  "
  },
  {
    "path": "texar_repo/docs/code/run.rst",
    "chars": 112,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nExecutor\n********\n\n.. autoclass:: texar.run.Executor\n    :members:\n"
  },
  {
    "path": "texar_repo/docs/code/txtgen.rst",
    "chars": 37,
    "preview": "Texar\n******\n\n.. automodule:: texar \n"
  },
  {
    "path": "texar_repo/docs/code/utils.rst",
    "chars": 6546,
    "preview": ".. role:: hidden\n    :class: hidden-section\n\nUtils\n**************\n\nFrequent Use\n============\n\n:hidden:`AverageRecorder`\n"
  },
  {
    "path": "texar_repo/docs/conf.py",
    "chars": 11100,
    "preview": "# -*- coding: utf-8 -*-\n#\n# texar documentation build configuration file, created by\n# sphinx-quickstart on Mon Sep  4 2"
  },
  {
    "path": "texar_repo/docs/examples.md",
    "chars": 4544,
    "preview": "# Examples #\n\nRich examples are included to demonstrate the use of Texar. The implementations of cutting-edge models/alg"
  },
  {
    "path": "texar_repo/docs/get_started.md",
    "chars": 6075,
    "preview": "# Overview #\n\n**Texar** is an open-source toolkit based on Tensorflow, aiming to support a broad set of machine learning"
  },
  {
    "path": "texar_repo/docs/index.rst",
    "chars": 726,
    "preview": ".. texar documentation master file, created by\n   sphinx-quickstart on Mon Sep  4 21:15:05 2017.\n   You can adapt this f"
  },
  {
    "path": "texar_repo/docs/make.bat",
    "chars": 7449,
    "preview": "@ECHO OFF\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=sphinx-build\n)\nset BUI"
  },
  {
    "path": "texar_repo/docs/requirements.txt",
    "chars": 119,
    "preview": "sphinx\nsphinx-rtd-theme >= 0.2.4\nsphinxcontrib-napoleon >= 0.6.1\nPygments >= 2.1.1\ntensorflow >= 1.7.0\npyyaml\nfuncsigs\n"
  },
  {
    "path": "texar_repo/docs/tutorials/tutorial.rst",
    "chars": 64,
    "preview": "Getting Started\n===============\n\nWrite an awesome tutorial here."
  },
  {
    "path": "texar_repo/examples/README.md",
    "chars": 4129,
    "preview": "# Examples #\n\nRich examples are included to demonstrate the use of Texar. The implementations of cutting-edge models/alg"
  },
  {
    "path": "texar_repo/examples/bert/README.md",
    "chars": 5138,
    "preview": "# BERT: Pre-trained models and downstream applications\n\nThis is a Texar implementation of Google's BERT model, which all"
  },
  {
    "path": "texar_repo/examples/bert/bert_classifier_main.py",
    "chars": 11151,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/bert/bert_config_lib/README.md",
    "chars": 293,
    "preview": "### Configuration files of BERT models in Texar style.\n\nFor example, `config_model_uncased_L-12_H-768_A-12.py` is the Te"
  },
  {
    "path": "texar_repo/examples/bert/bert_config_lib/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "texar_repo/examples/bert/bert_config_lib/config_model_uncased_L-12_H-768_A-12.py",
    "chars": 1232,
    "preview": "embed = {\n    'dim': 768,\n    'name': 'word_embeddings'\n}\nvocab_size = 30522\n\nsegment_embed = {\n    'dim': 768,\n    'nam"
  },
  {
    "path": "texar_repo/examples/bert/config_classifier.py",
    "chars": 553,
    "preview": "hidden_dim = 768\n\nopt = {\n    'optimizer': {\n        'type': 'AdamWeightDecayOptimizer',\n        'kwargs': {\n           "
  },
  {
    "path": "texar_repo/examples/bert/config_data_mrpc.py",
    "chars": 150,
    "preview": "data_dir = 'data/MRPC'\ntrain_batch_size = 32\nmax_seq_length = 128\neval_batch_size = 8\ntest_batch_size = 8\nmax_train_epoc"
  },
  {
    "path": "texar_repo/examples/bert/config_data_sst.py",
    "chars": 151,
    "preview": "data_dir = 'data/SST-2'\ntrain_batch_size = 32\nmax_seq_length = 128\neval_batch_size = 8\ntest_batch_size = 8\nmax_train_epo"
  },
  {
    "path": "texar_repo/examples/bert/utils/data_utils.py",
    "chars": 22082,
    "preview": "\"\"\"\nThis is the Data Loading Pipeline for Sentence Classifier Task from\nhttps://github.com/google-research/bert/blob/mas"
  },
  {
    "path": "texar_repo/examples/bert/utils/model_utils.py",
    "chars": 6895,
    "preview": "\"\"\"\nModel utility functions\n\"\"\"\nimport json\nimport collections\nimport re\nimport random\nimport tensorflow as tf\nimport nu"
  },
  {
    "path": "texar_repo/examples/bert/utils/tokenization.py",
    "chars": 10572,
    "preview": "# coding=utf-8\n# Copied from google BERT repo.\n\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under"
  },
  {
    "path": "texar_repo/examples/distributed_gpu/README.md",
    "chars": 5697,
    "preview": "# Model Training with Multi/Distributed GPUs\n\nThis example shows how models built with Texar can be trained with multipl"
  },
  {
    "path": "texar_repo/examples/distributed_gpu/config_large.py",
    "chars": 1497,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/distributed_gpu/config_medium.py",
    "chars": 1488,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/distributed_gpu/config_small.py",
    "chars": 1486,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/distributed_gpu/lm_ptb_distributed.py",
    "chars": 8368,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/distributed_gpu/ptb_reader.py",
    "chars": 3459,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/hierarchical_dialog/README.md",
    "chars": 3792,
    "preview": "# Hierarchical Recurrent Encoder-Decoder (HRED) Dialogue Model\n\nThis example builds a HRED dialogue model described in ["
  },
  {
    "path": "texar_repo/examples/hierarchical_dialog/config_data.py",
    "chars": 1949,
    "preview": "import os\n\ndata_root = './data'\nmax_utterance_cnt = 9\n\ndata_hparams = {\n    stage: {\n        \"num_epochs\": 1,\n        \"s"
  },
  {
    "path": "texar_repo/examples/hierarchical_dialog/config_model_biminor.py",
    "chars": 1564,
    "preview": "\nimport tensorflow as tf\n\nnum_samples = 10 # Number of samples generated for each test data instance\nbeam_width = num_sa"
  },
  {
    "path": "texar_repo/examples/hierarchical_dialog/config_model_uniminor.py",
    "chars": 1350,
    "preview": "\nimport tensorflow as tf\n\nnum_samples = 10 # Number of samples generated for each test data instance\nbeam_width = num_sa"
  },
  {
    "path": "texar_repo/examples/hierarchical_dialog/hred.py",
    "chars": 10313,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/hierarchical_dialog/sw_loader.py",
    "chars": 7790,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/language_model_ptb/README.md",
    "chars": 1613,
    "preview": "# Language Model on PTB #\n\nThis example builds an LSTM language model, and trains on PTB data. Model and training are de"
  },
  {
    "path": "texar_repo/examples/language_model_ptb/config_large.py",
    "chars": 1497,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/language_model_ptb/config_medium.py",
    "chars": 1488,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/language_model_ptb/config_small.py",
    "chars": 1486,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/language_model_ptb/lm_ptb.py",
    "chars": 6518,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/language_model_ptb/ptb_reader.py",
    "chars": 2970,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/memory_network_lm/README.md",
    "chars": 2118,
    "preview": "# End-to-End Memory Network for Language Modeling #\n\nThis example builds a Memory Network language model, and trains on "
  },
  {
    "path": "texar_repo/examples/memory_network_lm/config.py",
    "chars": 1354,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/memory_network_lm/lm_ptb_memnet.py",
    "chars": 7320,
    "preview": "#!/usr/bin/env python3\n# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, V"
  },
  {
    "path": "texar_repo/examples/memory_network_lm/ptb_reader.py",
    "chars": 3542,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/rl_gym/README.md",
    "chars": 622,
    "preview": "# Reinforcement Learning for Games #\n\nThis example implements three RL algorithms for the Cartpole game based on the Ope"
  },
  {
    "path": "texar_repo/examples/rl_gym/ac_cartpole.py",
    "chars": 2090,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/rl_gym/config.py",
    "chars": 805,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/rl_gym/dqn_cartpole.py",
    "chars": 2105,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/rl_gym/pg_cartpole.py",
    "chars": 2237,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/sentence_classifier/README.md",
    "chars": 1311,
    "preview": "# Sentence Sentiment Classifier #\n\nThis example builds sentence convolutional classifier, and trains on [SST data](https"
  },
  {
    "path": "texar_repo/examples/sentence_classifier/clas_main.py",
    "chars": 4033,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/sentence_classifier/config_kim.py",
    "chars": 2322,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/sentence_classifier/sst_data_preprocessor.py",
    "chars": 3365,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_attn/README.md",
    "chars": 1510,
    "preview": "# Seq2seq Model #\n\nThis example builds an attentional seq2seq model for machine translation.\n\n## Usage ##\n\n### Dataset #"
  },
  {
    "path": "texar_repo/examples/seq2seq_attn/config_iwslt14.py",
    "chars": 1045,
    "preview": "\nnum_epochs = 15\ndisplay = 500\n\nsource_vocab_file = './data/iwslt14/vocab.de'\ntarget_vocab_file = './data/iwslt14/vocab."
  },
  {
    "path": "texar_repo/examples/seq2seq_attn/config_model.py",
    "chars": 648,
    "preview": "# Attentional Seq2seq model.\n# Hyperparameters not specified here will take the default values.\n\nnum_units = 256\nbeam_wi"
  },
  {
    "path": "texar_repo/examples/seq2seq_attn/config_model_full.py",
    "chars": 3983,
    "preview": "# The full possible hyperparameters for the attentional seq2seq model.\n# Most of the hyperparameters take the default va"
  },
  {
    "path": "texar_repo/examples/seq2seq_attn/config_toy_copy.py",
    "chars": 995,
    "preview": "\nnum_epochs = 4\ndisplay = 50\n\nsource_vocab_file = './data/toy_copy/train/vocab.sources.txt'\ntarget_vocab_file = './data/"
  },
  {
    "path": "texar_repo/examples/seq2seq_attn/prepare_data.py",
    "chars": 1594,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_attn/seq2seq_attn.py",
    "chars": 5604,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_configs/README.md",
    "chars": 1028,
    "preview": "# Seq2seq Model #\n\nThis example builds a (plain) seq2seq model with Texar's model template and Tensorflow estimator. \n\n#"
  },
  {
    "path": "texar_repo/examples/seq2seq_configs/config_data_toy_copy.yml",
    "chars": 853,
    "preview": "# NMT data config. See `texar.data.PairedTextData.default_hparams()` for \n# hyperparameters of train/eval data. Hyperpar"
  },
  {
    "path": "texar_repo/examples/seq2seq_configs/config_model_medium.yml",
    "chars": 696,
    "preview": "# Basic Seq2seq model of medium size. See \n# `texar.models.BasicSeq2seq.default_hparams()` for possible hyperparameters "
  },
  {
    "path": "texar_repo/examples/seq2seq_configs/config_model_small.yml",
    "chars": 651,
    "preview": "# Basic Seq2seq model of small size. See \n# `texar.models.BasicSeq2seq.default_hparams()` for possible hyperparameters \n"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/README.md",
    "chars": 5339,
    "preview": "# Sequence Generation Algorithms Tackling Exposure Bias #\n\nDespite the computational simplicity and efficiency, maximum "
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/baseline_seq2seq_attn_main.py",
    "chars": 8686,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/configs/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/configs/config_giga.py",
    "chars": 1139,
    "preview": "num_epochs = 30\nobserve_steps = 500\n\neval_metric = 'rouge'\n\nbatch_size = 64\nsource_vocab_file = './data/giga/vocab.artic"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/configs/config_iwslt14.py",
    "chars": 1266,
    "preview": "num_epochs = 50 # the best epoch occurs within 10 epochs in most cases\nobserve_steps = 500\n\neval_metric = 'bleu'\n\nbatch_"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/configs/config_model.py",
    "chars": 771,
    "preview": "num_units = 256\nbeam_width = 5\ndecoder_layers = 1\ndropout = 0.2\n\nembedder = {\n    'dim': num_units\n}\nencoder = {\n    'rn"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/interpolation_decoder.py",
    "chars": 5276,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/interpolation_helper.py",
    "chars": 8468,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/interpolation_main.py",
    "chars": 11893,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/raml_main.py",
    "chars": 13102,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/requirements.txt",
    "chars": 12,
    "preview": "rouge==0.2.1"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/scheduled_sampling_main.py",
    "chars": 10165,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/utils/prepare_data.py",
    "chars": 1588,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/README.md",
    "chars": 459,
    "preview": "## Augmented Data Generation for RAML Algorithm\n\nCodes here are mainly copied from [pcyin's github](https://github.com/p"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/gen_samples_giga.sh",
    "chars": 471,
    "preview": "#!/bin/sh\n\ntrain_src=\"../../data/giga/train.article\"\ntrain_tgt=\"../../data/giga/train.title\"\n\npython vocab.py \\\n\t--src_v"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/gen_samples_iwslt14.sh",
    "chars": 477,
    "preview": "#!/bin/sh\n\ntrain_src=\"../../data/iwslt14/train.de\"\ntrain_tgt=\"../../data/iwslt14/train.en\"\n\npython vocab.py \\\n\t--src_voc"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/process_samples.py",
    "chars": 11356,
    "preview": "from __future__ import print_function\nfrom nltk.translate.bleu_score import sentence_bleu\nfrom nltk.translate.bleu_score"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/util.py",
    "chars": 1691,
    "preview": "from collections import defaultdict\nimport numpy as np\n\ndef read_corpus(file_path, source):\n    data = []\n    for line i"
  },
  {
    "path": "texar_repo/examples/seq2seq_exposure_bias/utils/raml_samples_generation/vocab.py",
    "chars": 3835,
    "preview": "from __future__ import print_function\nimport argparse\nfrom collections import Counter\nfrom itertools import chain\n\nimpor"
  },
  {
    "path": "texar_repo/examples/seq2seq_rl/README.md",
    "chars": 2088,
    "preview": "# Seq2seq Model with Policy Gradient Training #\n\nThis example builds an attentional seq2seq model that is trained with p"
  },
  {
    "path": "texar_repo/examples/seq2seq_rl/config_iwslt14.py",
    "chars": 1071,
    "preview": "\ndisplay = 100\ndisplay_eval = 5500\n\nsource_vocab_file = './data/iwslt14/vocab.de'\ntarget_vocab_file = './data/iwslt14/vo"
  },
  {
    "path": "texar_repo/examples/seq2seq_rl/config_model.py",
    "chars": 575,
    "preview": "# Attentional Seq2seq model.\n# Hyperparameters not specified here will take the default values.\n\nnum_units = 256\nbeam_wi"
  },
  {
    "path": "texar_repo/examples/seq2seq_rl/config_toy_copy.py",
    "chars": 1141,
    "preview": "\ndisplay = 10\ndisplay_eval = 300\n\nsource_vocab_file = './data/toy_copy/train/vocab.sources.txt'\ntarget_vocab_file = './d"
  },
  {
    "path": "texar_repo/examples/seq2seq_rl/prepare_data.py",
    "chars": 1594,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seq2seq_rl/seq2seq_attn_pg.py",
    "chars": 7419,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seqgan/README.md",
    "chars": 5447,
    "preview": "# SeqGAN for Text Generation\n\nThis example is an implementation of [(Yu et al.) SeqGAN: Sequence Generative Adversarial "
  },
  {
    "path": "texar_repo/examples/seqgan/config_coco.py",
    "chars": 2018,
    "preview": "generator_pretrain_epoch = 80\ndiscriminator_pretrain_epoch = 80\nadversial_epoch = 100\n\nhidden_size = 32\nbatch_size = 64\n"
  },
  {
    "path": "texar_repo/examples/seqgan/config_ptb_large.py",
    "chars": 2005,
    "preview": "generator_pretrain_epoch = 55\ndiscriminator_pretrain_epoch = 15\nadversial_epoch = 20\n\nhidden_size = 1500\nbatch_size = 64"
  },
  {
    "path": "texar_repo/examples/seqgan/config_ptb_medium.py",
    "chars": 2003,
    "preview": "generator_pretrain_epoch = 39\ndiscriminator_pretrain_epoch = 15\nadversial_epoch = 20\n\nhidden_size = 650\nbatch_size = 64\n"
  },
  {
    "path": "texar_repo/examples/seqgan/config_ptb_small.py",
    "chars": 2002,
    "preview": "generator_pretrain_epoch = 13\ndiscriminator_pretrain_epoch = 15\nadversial_epoch = 10\n\nhidden_size = 200\nbatch_size = 64\n"
  },
  {
    "path": "texar_repo/examples/seqgan/data_utils.py",
    "chars": 1970,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/seqgan/seqgan_train.py",
    "chars": 14349,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/sequence_tagging/README.md",
    "chars": 1115,
    "preview": "# Sequence tagging on CoNLL-2003 #\n\nThis example builds a bi-directional LSTM-CNN model for NER task and trains on CoNLL"
  },
  {
    "path": "texar_repo/examples/sequence_tagging/config.py",
    "chars": 1766,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/sequence_tagging/conll_reader.py",
    "chars": 8977,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/sequence_tagging/conll_writer.py",
    "chars": 779,
    "preview": "__author__ = 'max'\n\n\nclass CoNLLWriter(object):\n    def __init__(self, i2w, i2n):\n        self.__source_file = None\n    "
  },
  {
    "path": "texar_repo/examples/sequence_tagging/conlleval",
    "chars": 12735,
    "preview": "#!/usr/bin/perl -w\n# conlleval: evaluate result of processing CoNLL-2000 shared task\n# usage:     conlleval [-l] [-r] [-"
  },
  {
    "path": "texar_repo/examples/sequence_tagging/ner.py",
    "chars": 8240,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/sequence_tagging/scores.py",
    "chars": 423,
    "preview": "import subprocess\nimport sys \n\ndef scores(path):\n  bashCommand = 'perl conlleval'\n  process = subprocess.Popen(bashComma"
  },
  {
    "path": "texar_repo/examples/text_style_transfer/README.md",
    "chars": 5540,
    "preview": "# Text Style Transfer #\n\nThis example implements a simplified variant of the `ctrl-gen` model from \n\n[Toward Controlled "
  },
  {
    "path": "texar_repo/examples/text_style_transfer/config.py",
    "chars": 2839,
    "preview": "\"\"\"Config\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_functi"
  },
  {
    "path": "texar_repo/examples/text_style_transfer/ctrl_gen_model.py",
    "chars": 7265,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/text_style_transfer/main.py",
    "chars": 6683,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/text_style_transfer/prepare_data.py",
    "chars": 1042,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/torchtext/.gitignore",
    "chars": 21,
    "preview": ".data/\n.vector_cache/"
  },
  {
    "path": "texar_repo/examples/torchtext/README.md",
    "chars": 291,
    "preview": "# Data loading with torchtext #\n\nThis example demonstrates the use of [torchtext](https://github.com/pytorch/text) packa"
  },
  {
    "path": "texar_repo/examples/torchtext/batchfirst_bptt.py",
    "chars": 3216,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/torchtext/config_small.py",
    "chars": 1486,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/torchtext/lm_torchtext.py",
    "chars": 5688,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/torchtext/requirements.txt",
    "chars": 68,
    "preview": "# also make sure install PyTorch 0.4.0 or newer. \ntorchtext >= 0.2.3"
  },
  {
    "path": "texar_repo/examples/transformer/README.md",
    "chars": 8152,
    "preview": "# Transformer for Machine Translation #\n\nThis is an implementation of the Transformer model described in [Vaswani, Ashis"
  },
  {
    "path": "texar_repo/examples/transformer/bleu_tool.py",
    "chars": 7927,
    "preview": "# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you ma"
  },
  {
    "path": "texar_repo/examples/transformer/config_iwslt15.py",
    "chars": 247,
    "preview": "batch_size = 2048\ntest_batch_size = 64\n\nmax_train_epoch = 20\ndisplay_steps = 500\neval_steps = 2000\n\nmax_decoding_length "
  },
  {
    "path": "texar_repo/examples/transformer/config_model.py",
    "chars": 1392,
    "preview": "\"\"\"Configurations of Transformer model\n\"\"\"\nimport copy\nimport texar as tx\n\nrandom_seed = 1234\nbeam_width = 5\nalpha = 0.6"
  },
  {
    "path": "texar_repo/examples/transformer/config_wmt14.py",
    "chars": 246,
    "preview": "batch_size = 3072\ntest_batch_size = 64\n\nmax_train_epoch = 10\ndisplay_steps = 500\neval_steps = 2000\n\nmax_decoding_length="
  },
  {
    "path": "texar_repo/examples/transformer/preprocess_data.sh",
    "chars": 4576,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/transformer/requirements.txt",
    "chars": 30,
    "preview": "torchtext\ntorch\nsentencepiece\n"
  },
  {
    "path": "texar_repo/examples/transformer/scripts/iwslt15_en_vi.sh",
    "chars": 1089,
    "preview": "#!/bin/sh\n# Copied from https://github.com/tensorflow/nmt/blob/master/nmt/scripts/download_iwslt15.sh\n#\n# Download small"
  },
  {
    "path": "texar_repo/examples/transformer/scripts/wmt14_en_de.sh",
    "chars": 5933,
    "preview": "#!/usr/bin/env bash\n\n# This code was adapted from Tensorflow NMT toolkit on 03/24/2018.\n# URL: https://raw.githubusercon"
  },
  {
    "path": "texar_repo/examples/transformer/transformer_main.py",
    "chars": 11196,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/transformer/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "texar_repo/examples/transformer/utils/data_utils.py",
    "chars": 3982,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/transformer/utils/preprocess.py",
    "chars": 9765,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/transformer/utils/utils.py",
    "chars": 2340,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/vae_text/README.md",
    "chars": 2891,
    "preview": "# Variational Autoencoder (VAE) for Text Generation\n\nThis example builds a VAE for text generation, with an LSTM as enco"
  },
  {
    "path": "texar_repo/examples/vae_text/config_lstm_ptb.py",
    "chars": 3036,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/vae_text/config_lstm_yahoo.py",
    "chars": 3117,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/vae_text/config_trans_ptb.py",
    "chars": 4347,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/vae_text/config_trans_yahoo.py",
    "chars": 4210,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/vae_text/prepare_data.py",
    "chars": 2331,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/examples/vae_text/vae_train.py",
    "chars": 13147,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/requirements.txt",
    "chars": 130,
    "preview": "tensorflow >= 1.7.0\ntensorflow-gpu >= 1.7.0\ntensorflow-probability >= 0.3.0\ntensorflow-probability-gpu >= 0.3.0\nfuncsigs"
  },
  {
    "path": "texar_repo/setup.py",
    "chars": 1493,
    "preview": "import setuptools\n\n\nlong_description = '''\nTexar is an open-source toolkit based on Tensorflow,\naiming to support a broa"
  },
  {
    "path": "texar_repo/texar/__init__.py",
    "chars": 1093,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/__init__.py",
    "chars": 1045,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/ac_agent.py",
    "chars": 8544,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/agent_base.py",
    "chars": 1981,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/agent_gym_utils.py",
    "chars": 1899,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/agent_utils.py",
    "chars": 5578,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/agent_utils_test.py",
    "chars": 1102,
    "preview": "#\n\"\"\"\nUnit tests for agent utilities.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom _"
  },
  {
    "path": "texar_repo/texar/agents/dqn_agent.py",
    "chars": 16951,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/episodic_agent_base.py",
    "chars": 4510,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/pg_agent.py",
    "chars": 10100,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/seq_agent_base.py",
    "chars": 1298,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/seq_pg_agent.py",
    "chars": 15335,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/agents/seq_pg_agent_test.py",
    "chars": 2708,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/context.py",
    "chars": 3064,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/context_test.py",
    "chars": 2471,
    "preview": "# -*- coding: utf-8 -*-\n#\n\"\"\"\nUnit tests for various context functionalities.\n\"\"\"\n\nfrom __future__ import absolute_impor"
  },
  {
    "path": "texar_repo/texar/core/__init__.py",
    "chars": 930,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/core/explorations.py",
    "chars": 3669,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/core/layers.py",
    "chars": 43303,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/core/layers_test.py",
    "chars": 11665,
    "preview": "#\n\"\"\"\nUnit tests for various layers.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __"
  },
  {
    "path": "texar_repo/texar/core/optimization.py",
    "chars": 23153,
    "preview": "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
  },
  {
    "path": "texar_repo/texar/core/optimization_test.py",
    "chars": 5684,
    "preview": "#\n\"\"\"\nUnit tests for various optimization related utilities.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__"
  }
]

// ... and 123 more files (download for full content)

About this extraction

This page contains the full source code of the santhoshkolloju/Abstractive-Summarization-With-Transfer-Learning GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 323 files (2.8 MB), approximately 746.2k tokens, and a symbol index with 1333 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!