[
  {
    "path": ".gitignore",
    "content": "tmp\ndata\nlogs\ncheckpoints\n*.pem\n*.sh\n*autoenv*\n\n# PYTHON\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n\n# NODE\n# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\n\n# Runtime data\npids\n*.pid\n*.seed\n*.pid.lock\n\n# Directory for instrumented libs generated by jscoverage/JSCover\nlib-cov\n\n# Coverage directory used by tools like istanbul\ncoverage\n\n# nyc test coverage\n.nyc_output\n\n# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)\n.grunt\n\n# Bower dependency directory (https://bower.io/)\nbower_components\n\n# node-waf configuration\n.lock-wscript\n\n# Compiled binary addons (http://nodejs.org/api/addons.html)\nbuild/Release\n\n# Dependency directories\nnode_modules/\njspm_packages/\n\n# Typescript v1 declaration files\ntypings/\n\n# Optional npm cache directory\n.npm\n\n# Optional eslint cache\n.eslintcache\n\n# Optional REPL history\n.node_repl_history\n\n# Output of 'npm pack'\n*.tgz\n\n# Yarn Integrity file\n.yarn-integrity\n\n# dotenv environment variables file\n.env\n\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"House3D\"]\n\tpath = House3D\n\turl = git@github.com:abhshkdz/House3D.git\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Code of Conduct\n\nFacebook has adopted a Code of Conduct that we expect project participants to adhere to.\nPlease read the [full text](https://code.facebook.com/pages/876921332402685/open-source-code-of-conduct)\nso that you can understand what actions will and will not be tolerated."
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to EmbodiedQA\nWe want to make contributing to this project as easy and transparent as\npossible.\n\n## Our Development Process\nMinor changes and improvements will be released on an ongoing basis. Larger changes (e.g., changesets implementing a new paper) will be released on a more periodic basis.\n\n## Pull Requests\nWe actively welcome your pull requests.\n\n1. Fork the repo and create your branch from `master`.\n2. If you've added code that should be tested, add tests.\n3. If you've changed APIs, update the documentation.\n4. Ensure the test suite passes.\n5. Make sure your code lints.\n6. If you haven't already, complete the Contributor License Agreement (\"CLA\").\n\n## Contributor License Agreement (\"CLA\")\nIn order to accept your pull request, we need you to submit a CLA. You only need\nto do this once to work on any of Facebook's open source projects.\n\nComplete your CLA here: <https://code.facebook.com/cla>\n\n## Issues\nWe use GitHub issues to track public bugs. Please ensure your description is\nclear and has sufficient instructions to be able to reproduce the issue.\n\nFacebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe\ndisclosure of security bugs. In those cases, please go through the process\noutlined on that page and do not file a public issue.\n\n## Coding Style  \n* 4 spaces for indentation rather than tabs\n* 80 character line length\n\n## License\nBy contributing to EmbodiedQA, you agree that your contributions will be licensed\nunder the LICENSE file in the root directory of this source tree."
  },
  {
    "path": "LICENSE",
    "content": "BSD License\n\nFor EmbodiedQA software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n   endorse or promote products derived from this software without specific\n   prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "# EmbodiedQA\n\nCode for the paper\n\n**[Embodied Question Answering][1]**  \nAbhishek Das, Samyak Datta, Georgia Gkioxari, Stefan Lee, Devi Parikh, Dhruv Batra  \n[arxiv.org/abs/1711.11543][2]  \nCVPR 2018 (Oral)\n\nIn Embodied Question Answering (EmbodiedQA), an agent is spawned at a random location in a 3D environment and asked a question (for e.g. \"What color is the car?\"). In order to answer, the agent must first intelligently navigate to explore the environment, gather necessary visual information through first-person vision, and then answer the question (\"orange\").\n\n![](https://i.imgur.com/jeI7bxm.jpg)\n\nThis repository provides\n\n- [Pretrained CNN](#pretrained-cnn) for [House3D][house3d]\n- Code for [generating EQA questions](#question-generation)\n    - EQA v1: location, color, place preposition\n    - EQA v1-extended: existence, logical, object counts, room counts, distance comparison\n- Code to train and evaluate [navigation](#navigation) and [question-answering](#visual-question-answering) models\n    - [independently with supervised learning](#supervised-learning) on shortest paths\n    - jointly using [reinforcement learning](#reinforce)\n\nIf you find this code useful, consider citing our work:\n\n```\n@inproceedings{embodiedqa,\n  title={{E}mbodied {Q}uestion {A}nswering},\n  author={Abhishek Das and Samyak Datta and Georgia Gkioxari and Stefan Lee and Devi Parikh and Dhruv Batra},\n  booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},\n  year={2018}\n}\n```\n\n## Setup\n\n```\nvirtualenv -p python3 .env\nsource .env/bin/activate\npip install -r requirements.txt\n```\n\nDownload the [SUNCG v1 dataset](https://github.com/facebookresearch/House3D/blob/master/INSTRUCTION.md#usage-instructions) and [install House3D](https://github.com/abhshkdz/House3D/tree/master/renderer#rendering-code-of-house3d).\n\nNOTE: This code uses a [fork of House3D](https://github.com/abhshkdz/house3d) with a few changes to support arbitrary map discretization resolutions.\n\n## Question generation\n\nQuestions for EmbodiedQA are generated programmatically, in a manner similar to [CLEVR (Johnson et al., 2017)][clevr].\n\nNOTE: Pre-generated EQA v1 questions are available for download [here][eqav1].\n\n### Generating questions for all templates in EQA v1, v1-extended\n\n```\ncd data/question-gen\n./run_me.sh MM_DD\n```\n\n### List defined question templates\n\n```\nfrom engine import Engine\n\nE = Engine()\nfor i in E.template_defs:\n    print(i, E.template_defs[i])\n```\n\n### Generate questions for a particular template (say `location`)\n\n```\nfrom house_parse import HouseParse\nfrom engine import Engine\n\nHp = HouseParse(dataDir='/path/to/suncg')\nHp.parse('0aa5e04f06a805881285402096eac723')\n\nE = Engine()\nE.cacheHouse(Hp)\nqns = E.executeFn(E.template_defs['location'])\n\nprint(qns[0]['question'], qns[0]['answer'])\n# what room is the clock located in? bedroom\n\n```\n\n## Pretrained CNN\n\nWe trained a shallow encoder-decoder CNN from scratch in the House3D environment,\nfor RGB reconstruction, semantic segmentation and depth estimation.\nOnce trained, we throw away the decoders, and use the encoder as a frozen feature\nextractor for navigation and question answering. The CNN is available for download here:\n\n`wget https://www.dropbox.com/s/ju1zw4iipxlj966/03_13_h3d_hybrid_cnn.pt`\n\nThe training code expects the checkpoint to be present in `training/models/`.\n\n## Supervised Learning\n\n### Download and preprocess the dataset\n\nDownload [EQA v1][eqav1] and shortest path navigations:\n\n```\nwget https://www.dropbox.com/s/6zu1b1jzl0qt7t1/eqa_v1.json\nwget https://www.dropbox.com/s/lhajthx7cdlnhns/a-star-500.zip\nunzip a-star-500.zip\n```\n\nIf this is the first time you are using SUNCG, you will have to clone and use the\n[SUNCG toolbox](https://github.com/shurans/SUNCGtoolbox#convert-to-objmtl)\nto generate obj + mtl files for the houses in EQA.\n\nNOTE: Shortest paths have been updated.  Earlier we computed shortest paths using a discrete grid world, but we found that these shortest paths were sometimes innacurate.  Old shortest paths are [here](https://www.dropbox.com/s/vgp2ygh1bht1jyb/shortest-paths.zip).\n\n```\ncd utils\npython make_houses.py \\\n    -eqa_path /path/to/eqa.json \\\n    -suncg_toolbox_path /path/to/SUNCGtoolbox \\\n    -suncg_data_path /path/to/suncg/data_root\n```\n\nPreprocess the dataset for training\n\n\n```\ncd training\npython utils/preprocess_questions_pkl.py \\\n    -input_json /path/to/eqa_v1.json \\\n    -shortest_path_dir /path/to/shortest/paths/a-star-500 \\\n    -output_train_h5 data/train.h5 \\\n    -output_val_h5 data/val.h5 \\\n    -output_test_h5 data/test.h5 \\\n    -output_data_json data/data.json \\\n    -output_vocab data/vocab.json\n```\n\n### Visual question answering\n\nUpdate pretrained CNN path in `models.py`.\n\n`python train_vqa.py -input_type ques,image -identifier ques-image -log -cache`\n\nThis model computes question-conditioned attention over last 5 frames from oracle navigation (shortest paths),\nand predicts answer. Assuming shortest paths are optimal for answering the question -- which is predominantly\ntrue for most questions in EQA v1 (`location`, `color`, `place preposition`) with the\nexception of a few `location` questions that might need more visual context than walking right up till the object --\nthis can be thought of as an upper bound on expected accuracy, and performance will get worse when navigation\ntrajectories are sampled from trained policies.\n\nA pretrained VQA model is available for download [here](https://www.dropbox.com/s/jd15af00r7m8neh/vqa_11_18_2018_va0.6154.pt). This gets a top-1 accuracy of 61.54% on val, and 58.46% on test (with GT navigation).\n\nNote that keeping the `cache` flag ON caches images as they are rendered in the first training epoch, so that subsequent epochs are very fast. This is memory-intensive though, and consumes ~100-120G RAM.\n\n### Navigation\n\nDownload potential maps for evaluating navigation and training with REINFORCE.\n\n```\nwget https://www.dropbox.com/s/53edqtr04jts4q0/target-obj-conn-maps-500.zip\n```\n\n#### Planner-controller policy\n\n`python train_nav.py -model_type pacman -identifier pacman -log`\n\n## REINFORCE\n\n```\npython train_eqa.py \\\n    -nav_checkpoint_path /path/to/nav/ques-image-pacman/checkpoint.pt \\\n    -ans_checkpoint_path /path/to/vqa/ques-image/checkpoint.pt \\\n    -identifier ques-image-eqa \\\n    -log\n```\n\n## Changelog\n\n### 09/07\n\n- We added the baseline models from the CVPR paper (Reactive and LSTM).\n- With the LSTM model, we achieved d_T values of: 0.74693/3.99891/8.10669 on the test set for d equal to 10/30/50 respectively training with behavior cloning (no reinforcement learning).\n- We also updated the shortest paths to fix an issue with the shortest path algorithm we initially used.  Code to generate shortest paths is [here](https://github.com/facebookresearch/EmbodiedQA/blob/master/data/shortest-path-gen/generate-paths-a-star.py).\n\n### 06/13\n\nThis code release contains the following changes over the CVPR version\n\n- Larger dataset of questions + shortest paths\n- Color names as answers to color questions (earlier they were hex strings)\n\n## Acknowledgements\n\n- Parts of this code are adapted from [pytorch-a3c][pytorch-a3c] by Ilya Kostrikov\n- [Lisa Anne Hendricks](https://people.eecs.berkeley.edu/~lisa_anne/) and [Licheng Yu](http://www.cs.unc.edu/~licheng/)\nhelped with running / testing / debugging code prior to release\n\n## License\n\nBSD\n\n[1]: https://embodiedqa.org\n[2]: https://arxiv.org/abs/1711.11543\n[house3d]: https://github.com/facebookresearch/house3d\n[dijkstar]: https://bitbucket.org/wyatt/dijkstar\n[pytorch-a3c]: https://github.com/ikostrikov/pytorch-a3c\n[eqav1]: https://embodiedqa.org/data\n[clevr]: https://github.com/facebookresearch/clevr-dataset-gen\n"
  },
  {
    "path": "requirements.txt",
    "content": "certifi==2018.4.16\nchardet==3.0.4\nfuture==0.16.0\ngym==0.10.5\nh5py==2.8.0\nidna==2.6\nnumpy==1.14.4\nopencv-python==3.4.1.15\nPillow==5.1.0\npyglet==1.3.2\nrequests==2.18.4\nscipy==1.1.0\nsix==1.11.0\ntorch==0.3.1\ntorchvision==0.2.1\ntqdm==4.23.4\nurllib3==1.22\n"
  },
  {
    "path": "training/data.py",
    "content": "import math\nimport time\nimport h5py\nimport logging\nimport argparse\nimport numpy as np\nimport os, sys, json\nfrom tqdm import tqdm\n\nfrom scipy.misc import imread, imresize\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.dataloader import default_collate\nfrom torch.autograd import Variable\n\nsys.path.insert(0, '../../House3D/')\nfrom House3D import objrender, Environment, load_config\nfrom House3D.core import local_create_house\n\nsys.path.insert(0, '../utils/')\nfrom house3d import House3DUtils\n\nfrom models import MultitaskCNN\n\nimport pdb\n\ndef load_vocab(path):\n    with open(path, 'r') as f:\n        vocab = json.load(f)\n        vocab['questionIdxToToken'] = invert_dict(vocab['questionTokenToIdx'])\n        vocab['answerIdxToToken'] = invert_dict(vocab['answerTokenToIdx'])\n\n    assert vocab['questionTokenToIdx']['<NULL>'] == 0\n    assert vocab['questionTokenToIdx']['<START>'] == 1\n    assert vocab['questionTokenToIdx']['<END>'] == 2\n    return vocab\n\n\ndef invert_dict(d):\n    return {v: k for k, v in d.items()}\n\n\n\"\"\"\nif the action sequence is [f, f, l, l, f, f, f, r]\n\ninput sequence to planner is [<start>, f, l, f, r]\noutput sequence for planner is [f, l, f, r, <end>]\n\ninput sequences to controller are [f, f, l, l, f, f, f, r]\noutput sequences for controller are [1, 0, 1, 0, 1, 1, 0, 0]\n\"\"\"\ndef flat_to_hierarchical_actions(actions, controller_action_lim):\n    assert len(actions) != 0\n\n    controller_action_ctr = 0\n\n    planner_actions, controller_actions = [1], []\n    prev_action = 1\n\n    pq_idx, cq_idx, ph_idx = [], [], []\n    ph_trck = 0\n\n    for i in range(1, len(actions)):\n\n        if actions[i] != prev_action:\n            planner_actions.append(actions[i])\n            pq_idx.append(i-1)\n\n        if i > 1:\n            ph_idx.append(ph_trck)\n            if actions[i] == prev_action:\n                controller_actions.append(1)\n                controller_action_ctr += 1\n            else:\n                controller_actions.append(0)\n                controller_action_ctr = 0\n                ph_trck += 1\n            cq_idx.append(i-1)\n\n\n        prev_action = actions[i]\n\n        if controller_action_ctr == controller_action_lim-1:\n            prev_action = False\n\n    return planner_actions, controller_actions, pq_idx, cq_idx, ph_idx\n\n\ndef _dataset_to_tensor(dset, mask=None, dtype=np.int64):\n    arr = np.asarray(dset, dtype=dtype)\n    if mask is not None:\n        arr = arr[mask]\n    if dtype == np.float32:\n        tensor = torch.FloatTensor(arr)\n    else:\n        tensor = torch.LongTensor(arr)\n    return tensor\n\n\ndef eqaCollateCnn(batch):\n    transposed = list(zip(*batch))\n    idx_batch = default_collate(transposed[0])\n    question_batch = default_collate(transposed[1])\n    answer_batch = default_collate(transposed[2])\n    images_batch = default_collate(transposed[3])\n    actions_in_batch = default_collate(transposed[4])\n    actions_out_batch = default_collate(transposed[5])\n    action_lengths_batch = default_collate(transposed[6])\n    return [\n        idx_batch, question_batch, answer_batch, images_batch,\n        actions_in_batch, actions_out_batch, action_lengths_batch\n    ]\n\n\ndef eqaCollateSeq2seq(batch):\n    transposed = list(zip(*batch))\n    idx_batch = default_collate(transposed[0])\n    questions_batch = default_collate(transposed[1])\n    answers_batch = default_collate(transposed[2])\n    images_batch = default_collate(transposed[3])\n    actions_in_batch = default_collate(transposed[4])\n    actions_out_batch = default_collate(transposed[5])\n    action_lengths_batch = default_collate(transposed[6])\n    mask_batch = default_collate(transposed[7])\n\n    return [\n        idx_batch, questions_batch, answers_batch, images_batch,\n        actions_in_batch, actions_out_batch, action_lengths_batch, mask_batch\n    ]\n\n\nclass EqaDataset(Dataset):\n    def __init__(self,\n                 questions_h5,\n                 vocab,\n                 num_frames=1,\n                 data_json=False,\n                 split='train',\n                 gpu_id=0,\n                 input_type='ques',\n                 max_threads_per_gpu=10,\n                 to_cache=False,\n                 target_obj_conn_map_dir=False,\n                 map_resolution=1000,\n                 overfit=False,\n                 max_controller_actions=5,\n                 max_actions=None):\n\n        self.questions_h5 = questions_h5\n        self.vocab = load_vocab(vocab)\n        self.num_frames = num_frames\n        self.max_controller_actions = max_controller_actions\n\n        np.random.seed()\n\n        self.data_json = data_json\n        self.split = split\n        self.gpu_id = gpu_id\n\n        self.input_type = input_type\n\n        self.max_threads_per_gpu = max_threads_per_gpu\n\n        self.target_obj_conn_map_dir = target_obj_conn_map_dir\n        self.map_resolution = map_resolution\n        self.overfit = overfit\n\n        self.to_cache = to_cache\n        self.img_data_cache = {}\n\n        print('Reading question data into memory')\n        self.idx = _dataset_to_tensor(questions_h5['idx'])\n        self.questions = _dataset_to_tensor(questions_h5['questions'])\n        self.answers = _dataset_to_tensor(questions_h5['answers'])\n        self.actions = _dataset_to_tensor(questions_h5['action_labels'])\n        self.action_lengths = _dataset_to_tensor(\n            questions_h5['action_lengths'])\n\n        if max_actions: #max actions will allow us to create arrays of a certain length.  Helpful if you only want to train with 10 actions.\n            assert isinstance(max_actions, int)\n            num_data_items = self.actions.shape[0]\n            new_actions = np.zeros((num_data_items, max_actions+2), dtype=np.int64)\n            new_lengths = np.ones((num_data_items,), dtype=np.int64)*max_actions\n            for i in range(num_data_items):\n                action_length = int(self.action_lengths[i])\n                new_actions[i,0] = 1\n                new_actions[i,1:max_actions+1] = self.actions[i, action_length-max_actions: action_length].numpy() \n            self.actions = torch.LongTensor(new_actions)\n            self.action_lengths = torch.LongTensor(new_lengths)\n\n        if self.data_json != False:\n            data = json.load(open(self.data_json, 'r'))\n            self.envs = data['envs']\n\n            self.env_idx = data[self.split + '_env_idx']\n            self.env_list = [self.envs[x] for x in self.env_idx]\n            self.env_set = list(set(self.env_list))\n            self.env_set.sort()\n\n            if self.overfit == True:\n                self.env_idx = self.env_idx[:1]\n                self.env_set = self.env_list = [self.envs[x] for x in self.env_idx]\n                print('Trying to overfit to [house %s]' % self.env_set[0])\n                logging.info('Trying to overfit to [house {}]'.format(self.env_set[0]))\n\n            print('Total envs: %d' % len(list(set(self.envs))))\n            print('Envs in %s: %d' % (self.split,\n                                      len(list(set(self.env_idx)))))\n\n            if input_type != 'ques':\n                ''''\n                If training, randomly sample and load a subset of environments,\n                train on those, and then cycle through to load the rest.\n\n                On the validation and test set, load in order, and cycle through.\n\n                For both, add optional caching so that if all environments\n                have been cycled through once, then no need to re-load and\n                instead, just the cache can be used.\n                '''\n\n                self.api_threads = []\n                self._load_envs(start_idx=0, in_order=True)\n\n                cnn_kwargs = {'num_classes': 191, 'pretrained': True}\n                self.cnn = MultitaskCNN(**cnn_kwargs)\n                self.cnn.eval()\n                self.cnn.cuda()\n\n            self.pos_queue = data[self.split + '_pos_queue']\n            self.boxes = data[self.split + '_boxes']\n\n            if max_actions:\n                for i in range(len(self.pos_queue)):\n                    self.pos_queue[i] = self.pos_queue[i][-1*max_actions:] \n\n        if input_type == 'pacman':\n\n            self.planner_actions = self.actions.clone().fill_(0)\n            self.controller_actions = self.actions.clone().fill_(-1)\n\n            self.planner_action_lengths = self.action_lengths.clone().fill_(0)\n            self.controller_action_lengths = self.action_lengths.clone().fill_(\n                0)\n\n            self.planner_hidden_idx = self.actions.clone().fill_(0)\n\n            self.planner_pos_queue_idx, self.controller_pos_queue_idx = [], []\n\n            # parsing flat actions to planner-controller hierarchy\n            for i in tqdm(range(len(self.actions))):\n\n                pa, ca, pq_idx, cq_idx, ph_idx = flat_to_hierarchical_actions(\n                    actions=self.actions[i][:self.action_lengths[i]+1],\n                    controller_action_lim=max_controller_actions)\n\n                self.planner_actions[i][:len(pa)] = torch.Tensor(pa)\n                self.controller_actions[i][:len(ca)] = torch.Tensor(ca)\n\n                self.planner_action_lengths[i] = len(pa)-1\n                self.controller_action_lengths[i] = len(ca)\n\n                self.planner_pos_queue_idx.append(pq_idx)\n                self.controller_pos_queue_idx.append(cq_idx)\n\n                self.planner_hidden_idx[i][:len(ca)] = torch.Tensor(ph_idx)\n\n    def _pick_envs_to_load(self,\n                           split='train',\n                           max_envs=10,\n                           start_idx=0,\n                           in_order=False):\n        if split in ['val', 'test'] or in_order == True:\n            pruned_env_set = self.env_set[start_idx:start_idx + max_envs]\n        else:\n            if max_envs < len(self.env_set):\n                env_inds = np.random.choice(\n                    len(self.env_set), max_envs, replace=False)\n            else:\n                env_inds = np.random.choice(\n                    len(self.env_set), max_envs, replace=True)\n            pruned_env_set = [self.env_set[x] for x in env_inds]\n        return pruned_env_set\n\n    def _load_envs(self, start_idx=-1, in_order=False):\n        #self._clear_memory()\n        if start_idx == -1:\n            start_idx = self.env_set.index(self.pruned_env_set[-1]) + 1\n\n        # Pick envs\n        self.pruned_env_set = self._pick_envs_to_load(\n            split=self.split,\n            max_envs=self.max_threads_per_gpu,\n            start_idx=start_idx,\n            in_order=in_order)\n\n        if len(self.pruned_env_set) == 0:\n            return\n\n        # Load api threads\n        start = time.time()\n        if len(self.api_threads) == 0:\n            for i in range(self.max_threads_per_gpu):\n                self.api_threads.append(\n                    objrender.RenderAPIThread(\n                        w=224, h=224, device=self.gpu_id))\n\n        try:\n            self.cfg = load_config('../House3D/tests/config.json')\n        except:\n            self.cfg = load_config('../../House3D/tests/config.json') #Sorry guys; this is so Lisa can run on her system; maybe we should make this an input somewhere?\n\n        print('[%.02f] Loaded %d api threads' % (time.time() - start,\n                                                 len(self.api_threads)))\n        start = time.time()\n\n        # Load houses\n        from multiprocessing import Pool\n        _args = ([h, self.cfg, self.map_resolution]\n                 for h in self.pruned_env_set)\n        with Pool(len(self.pruned_env_set)) as pool:\n            self.all_houses = pool.starmap(local_create_house, _args)\n\n        print('[%.02f] Loaded %d houses' % (time.time() - start,\n                                            len(self.all_houses)))\n        start = time.time()\n\n        # Load envs\n        self.env_loaded = {}\n        for i in range(len(self.all_houses)):\n            print('[%02d/%d][split:%s][gpu:%d][house:%s]' %\n                  (i + 1, len(self.all_houses), self.split, self.gpu_id,\n                   self.all_houses[i].house['id']))\n            environment = Environment(self.api_threads[i], self.all_houses[i], self.cfg)\n            self.env_loaded[self.all_houses[i].house['id']] = House3DUtils(\n                environment,\n                target_obj_conn_map_dir=self.target_obj_conn_map_dir,\n                build_graph=False)\n\n        # [TODO] Unused till now\n        self.env_ptr = -1\n\n        print('[%.02f] Loaded %d house3d envs' % (time.time() - start,\n                                                  len(self.env_loaded)))\n\n        # Mark available data indices\n        self.available_idx = [\n            i for i, v in enumerate(self.env_list) if v in self.env_loaded\n        ]\n\n        # [TODO] only keeping legit sequences\n        # needed for things to play well with old data\n        temp_available_idx = self.available_idx.copy()\n        for i in range(len(temp_available_idx)):\n            if self.action_lengths[temp_available_idx[i]] < 5:\n                self.available_idx.remove(temp_available_idx[i])\n\n        print('Available inds: %d' % len(self.available_idx))\n\n        # Flag to check if loaded envs have been cycled through or not\n        # [TODO] Unused till now\n        self.all_envs_loaded = False\n\n    def _clear_api_threads(self):\n        for i in range(len(self.api_threads)):\n            del self.api_threads[0]\n        self.api_threads = []\n\n    def _clear_memory(self):\n        if hasattr(self, 'episode_house'):\n            del self.episode_house\n        if hasattr(self, 'env_loaded'):\n            del self.env_loaded\n        if hasattr(self, 'api_threads'):\n            del self.api_threads\n        self.api_threads = []\n\n    def _check_if_all_envs_loaded(self):\n        print('[CHECK][Cache:%d][Total:%d]' % (len(self.img_data_cache),\n                                               len(self.env_list)))\n        if len(self.img_data_cache) == len(self.env_list):\n            self.available_idx = [i for i, v in enumerate(self.env_list)]\n            return True\n        else:\n            return False\n\n    def set_camera(self, e, pos, robot_height=1.0):\n        assert len(pos) == 4\n\n        e.env.cam.pos.x = pos[0]\n        e.env.cam.pos.y = robot_height\n        e.env.cam.pos.z = pos[2]\n        e.env.cam.yaw = pos[3]\n\n        e.env.cam.updateDirection()\n\n    def render(self, e):\n        return e.env.render()\n\n    def get_frames(self, e, pos_queue, preprocess=True):\n        if isinstance(pos_queue, list) == False:\n            pos_queue = [pos_queue]\n\n        res = []\n        for i in range(len(pos_queue)):\n            self.set_camera(e, pos_queue[i])\n            img = np.array(self.render(e), copy=False, dtype=np.float32)\n\n            if preprocess == True:\n                img = img.transpose(2, 0, 1)\n                img = img / 255.0\n\n            res.append(img)\n\n        return np.array(res)\n\n    def get_hierarchical_features_till_spawn(self, actions, backtrack_steps=0, max_controller_actions=5):\n\n        action_length = len(actions)-1\n        pa, ca, pq_idx, cq_idx, ph_idx = flat_to_hierarchical_actions(\n            actions=actions,\n            controller_action_lim=max_controller_actions)\n        \n        # count how many actions of same type have been encountered pefore starting navigation\n        backtrack_controller_steps = actions[1:action_length - backtrack_steps + 1:][::-1]\n        counter = 0 \n\n        if len(backtrack_controller_steps) > 0:\n            while (counter <= self.max_controller_actions) and (counter < len(backtrack_controller_steps) and (backtrack_controller_steps[counter] == backtrack_controller_steps[0])):\n                counter += 1 \n\n        target_pos_idx = action_length - backtrack_steps\n\n        controller_step = True\n        if target_pos_idx in pq_idx:\n            controller_step = False\n\n        pq_idx_pruned = [v for v in pq_idx if v <= target_pos_idx]\n        pa_pruned = pa[:len(pq_idx_pruned)+1]\n\n        images = self.get_frames(\n            self.episode_house,\n            self.episode_pos_queue,\n            preprocess=True)\n        raw_img_feats = self.cnn(\n            Variable(torch.FloatTensor(images)\n                     .cuda())).data.cpu().numpy().copy()\n\n        controller_img_feat = torch.from_numpy(raw_img_feats[target_pos_idx].copy())\n        controller_action_in = pa_pruned[-1] - 2\n\n        planner_img_feats = torch.from_numpy(raw_img_feats[pq_idx_pruned].copy())\n        planner_actions_in = torch.from_numpy(np.array(pa_pruned[:-1]) - 1)\n\n        return planner_actions_in, planner_img_feats, controller_step, controller_action_in, \\\n            controller_img_feat, self.episode_pos_queue[target_pos_idx], counter\n\n    def __getitem__(self, index):\n        # [VQA] question-only\n        if self.input_type == 'ques':\n            idx = self.idx[index]\n            question = self.questions[index]\n            answer = self.answers[index]\n\n            return (idx, question, answer)\n\n        # [VQA] question+image\n        elif self.input_type == 'ques,image':\n            index = self.available_idx[index]\n\n            idx = self.idx[index]\n            question = self.questions[index]\n            answer = self.answers[index]\n\n            action_length = self.action_lengths[index]\n            actions = self.actions[index]\n\n            actions_in = actions[action_length - self.num_frames:action_length]\n            actions_out = actions[action_length - self.num_frames + 1:\n                                  action_length + 1]\n\n            if self.to_cache == True and index in self.img_data_cache:\n                images = self.img_data_cache[index]\n            else:\n                pos_queue = self.pos_queue[index][\n                    -self.num_frames:]  # last 5 frames\n                images = self.get_frames(\n                    self.env_loaded[self.env_list[index]],\n                    pos_queue,\n                    preprocess=True)\n                if self.to_cache == True:\n                    self.img_data_cache[index] = images.copy()\n\n            return (idx, question, answer, images, actions_in, actions_out,\n                    action_length)\n\n        # [NAV] question+cnn\n        elif self.input_type in ['cnn', 'cnn+q']:\n\n            index = self.available_idx[index]\n\n            idx = self.idx[index]\n            question = self.questions[index]\n            answer = self.answers[index]\n\n            action_length = self.action_lengths[index]\n            actions = self.actions[index]\n\n            if self.to_cache == True and index in self.img_data_cache:\n                img_feats = self.img_data_cache[index]\n            else:\n                pos_queue = self.pos_queue[index]\n                images = self.get_frames(\n                    self.env_loaded[self.env_list[index]],\n                    pos_queue,\n                    preprocess=True)\n                img_feats = self.cnn(\n                    Variable(torch.FloatTensor(images)\n                             .cuda())).data.cpu().numpy().copy()\n                if self.to_cache == True:\n                    self.img_data_cache[index] = img_feats\n\n            # for val or test (evaluation), or\n            # when target_obj_conn_map_dir is defined (reinforce),\n            # load entire shortest path navigation trajectory\n            # and load connectivity map for intermediate rewards\n            if self.split in ['val', 'test'\n                              ] or self.target_obj_conn_map_dir != False:\n                target_obj_id, target_room = False, False\n                bbox_obj = [\n                    x for x in self.boxes[index]\n                    if x['type'] == 'object' and x['target'] == True\n                ][0]['box']\n                for obj_id in self.env_loaded[self.env_list[index]].objects:\n                    box2 = self.env_loaded[self.env_list[index]].objects[\n                        obj_id]['bbox']\n                    if all([bbox_obj['min'][x] == box2['min'][x] for x in range(3)]) == True and \\\n                        all([bbox_obj['max'][x] == box2['max'][x] for x in range(3)]) == True:\n                        target_obj_id = obj_id\n                        break\n                bbox_room = [\n                    x for x in self.boxes[index]\n                    if x['type'] == 'room' and x['target'] == False\n                ][0]\n                for room in self.env_loaded[self.env_list[\n                        index]].env.house.all_rooms:\n                    if all([room['bbox']['min'][i] == bbox_room['box']['min'][i] for i in range(3)]) and \\\n                        all([room['bbox']['max'][i] == bbox_room['box']['max'][i] for i in range(3)]):\n                        target_room = room\n                        break\n                assert target_obj_id != False\n                assert target_room != False\n                self.env_loaded[self.env_list[index]].set_target_object(\n                    self.env_loaded[self.env_list[index]].objects[\n                        target_obj_id], target_room)\n\n                # [NOTE] only works for batch size = 1\n                self.episode_pos_queue = self.pos_queue[index]\n                self.episode_house = self.env_loaded[self.env_list[index]]\n                self.target_room = target_room\n                self.target_obj = self.env_loaded[self.env_list[\n                    index]].objects[target_obj_id]\n\n                actions_in = actions[:action_length]\n                actions_out = actions[1:action_length + 1] - 2\n\n                return (idx, question, answer, img_feats, actions_in,\n                        actions_out, action_length)\n\n            # if action_length is n\n            # images.shape[0] is also n\n            # actions[0] is <START>\n            # actions[n] is <END>\n\n            # grab 5 random frames\n            # [NOTE]: this'll break for longer-than-5 navigation sequences\n            start_idx = np.random.choice(img_feats.shape[0] + 1 -\n                                         self.num_frames)\n            img_feats = img_feats[start_idx:start_idx + self.num_frames]\n\n            actions_in = actions[start_idx:start_idx + self.num_frames]\n            actions_out = actions[start_idx + self.num_frames] - 2\n\n            return (idx, question, answer, img_feats, actions_in, actions_out,\n                    action_length)\n\n        # [NAV] question+lstm\n        elif self.input_type in ['lstm', 'lstm+q']:\n\n            index = self.available_idx[index]\n\n            idx = self.idx[index]\n            question = self.questions[index]\n            answer = self.answers[index]\n\n            action_length = self.action_lengths[index]\n            actions = self.actions[index]\n\n            if self.split == 'train':\n                if self.to_cache == True and index in self.img_data_cache:\n                    img_feats = self.img_data_cache[index]\n                else:\n                    pos_queue = self.pos_queue[index]\n                    images = self.get_frames(\n                        self.env_loaded[self.env_list[index]],\n                        pos_queue,\n                        preprocess=True)\n                    raw_img_feats = self.cnn(\n                        Variable(torch.FloatTensor(images)\n                                 .cuda())).data.cpu().numpy().copy()\n                    img_feats = np.zeros(\n                        (self.actions.shape[1], raw_img_feats.shape[1]),\n                        dtype=np.float32)\n                    img_feats[:raw_img_feats.shape[\n                        0], :] = raw_img_feats.copy()\n                    if self.to_cache == True:\n                        self.img_data_cache[index] = img_feats\n\n            actions_in = actions.clone() - 1\n            actions_out = actions[1:].clone() - 2\n\n            actions_in[action_length:].fill_(0)\n            mask = actions_out.clone().gt(-1)\n            if len(actions_out) > action_length:\n                actions_out[action_length:].fill_(0)\n\n            # for val or test (evaluation), or\n            # when target_obj_conn_map_dir is defined (reinforce),\n            # load entire shortest path navigation trajectory\n            # and load connectivity map for intermediate rewards\n            if self.split in ['val', 'test'\n                              ] or self.target_obj_conn_map_dir != False:\n                target_obj_id, target_room = False, False\n                bbox_obj = [\n                    x for x in self.boxes[index]\n                    if x['type'] == 'object' and x['target'] == True\n                ][0]['box']\n                for obj_id in self.env_loaded[self.env_list[index]].objects:\n                    box2 = self.env_loaded[self.env_list[index]].objects[\n                        obj_id]['bbox']\n                    if all([bbox_obj['min'][x] == box2['min'][x] for x in range(3)]) == True and \\\n                        all([bbox_obj['max'][x] == box2['max'][x] for x in range(3)]) == True:\n                        target_obj_id = obj_id\n                        break\n                bbox_room = [\n                    x for x in self.boxes[index]\n                    if x['type'] == 'room' and x['target'] == False\n                ][0]\n                for room in self.env_loaded[self.env_list[\n                        index]].env.house.all_rooms:\n                    if all([room['bbox']['min'][i] == bbox_room['box']['min'][i] for i in range(3)]) and \\\n                        all([room['bbox']['max'][i] == bbox_room['box']['max'][i] for i in range(3)]):\n                        target_room = room\n                        break\n                assert target_obj_id != False\n                assert target_room != False\n                self.env_loaded[self.env_list[index]].set_target_object(\n                    self.env_loaded[self.env_list[index]].objects[\n                        target_obj_id], target_room)\n\n                # [NOTE] only works for batch size = 1\n                self.episode_pos_queue = self.pos_queue[index]\n                self.episode_house = self.env_loaded[self.env_list[index]]\n                self.target_room = target_room\n                self.target_obj = self.env_loaded[self.env_list[\n                    index]].objects[target_obj_id]\n\n                return (idx, question, answer, False, actions_in, actions_out,\n                        action_length, mask)\n\n            return (idx, question, answer, img_feats, actions_in, actions_out,\n                    action_length, mask)\n\n        # [NAV] planner-controller\n        elif self.input_type in ['pacman']:\n\n            index = self.available_idx[index]\n\n            idx = self.idx[index]\n            question = self.questions[index]\n            answer = self.answers[index]\n\n            action_length = self.action_lengths[index]\n            actions = self.actions[index]\n\n            planner_actions = self.planner_actions[index]\n            controller_actions = self.controller_actions[index]\n\n            planner_action_length = self.planner_action_lengths[index]\n            controller_action_length = self.controller_action_lengths[index]\n\n            planner_hidden_idx = self.planner_hidden_idx[index]\n\n            if self.split == 'train':\n                if self.to_cache == True and index in self.img_data_cache:\n                    img_feats = self.img_data_cache[index]\n                else:\n                    pos_queue = self.pos_queue[index]\n                    images = self.get_frames(\n                        self.env_loaded[self.env_list[index]],\n                        pos_queue,\n                        preprocess=True)\n                    raw_img_feats = self.cnn(\n                        Variable(torch.FloatTensor(images)\n                                 .cuda())).data.cpu().numpy().copy()\n                    img_feats = np.zeros(\n                        (self.actions.shape[1], raw_img_feats.shape[1]),\n                        dtype=np.float32)\n                    img_feats[:raw_img_feats.shape[\n                        0], :] = raw_img_feats.copy()\n                    if self.to_cache == True:\n                        self.img_data_cache[index] = img_feats\n\n            if self.split in ['val', 'test'\n                              ] or self.target_obj_conn_map_dir != False:\n                target_obj_id, target_room = False, False\n                bbox_obj = [\n                    x for x in self.boxes[index]\n                    if x['type'] == 'object' and x['target'] == True\n                ][0]['box']\n                for obj_id in self.env_loaded[self.env_list[index]].objects:\n                    box2 = self.env_loaded[self.env_list[index]].objects[\n                        obj_id]['bbox']\n                    if all([bbox_obj['min'][x] == box2['min'][x] for x in range(3)]) == True and \\\n                        all([bbox_obj['max'][x] == box2['max'][x] for x in range(3)]) == True:\n                        target_obj_id = obj_id\n                        break\n                bbox_room = [\n                    x for x in self.boxes[index]\n                    if x['type'] == 'room' and x['target'] == False\n                ][0]\n                for room in self.env_loaded[self.env_list[\n                        index]].env.house.all_rooms:\n                    if all([room['bbox']['min'][i] == bbox_room['box']['min'][i] for i in range(3)]) and \\\n                        all([room['bbox']['max'][i] == bbox_room['box']['max'][i] for i in range(3)]):\n                        target_room = room\n                        break\n                assert target_obj_id != False\n                assert target_room != False\n                self.env_loaded[self.env_list[index]].set_target_object(\n                    self.env_loaded[self.env_list[index]].objects[\n                        target_obj_id], target_room)\n\n                # [NOTE] only works for batch size = 1\n                self.episode_pos_queue = self.pos_queue[index]\n                self.episode_house = self.env_loaded[self.env_list[index]]\n                self.target_room = target_room\n                self.target_obj = self.env_loaded[self.env_list[\n                    index]].objects[target_obj_id]\n\n                return (idx, question, answer, actions, action_length)\n\n            planner_pos_queue_idx = self.planner_pos_queue_idx[index]\n            controller_pos_queue_idx = self.controller_pos_queue_idx[index]\n\n            planner_img_feats = np.zeros(\n                (self.actions.shape[1], img_feats.shape[1]), dtype=np.float32)\n            planner_img_feats[:planner_action_length] = img_feats[\n                planner_pos_queue_idx]\n\n            planner_actions_in = planner_actions.clone() - 1\n            planner_actions_out = planner_actions[1:].clone() - 2\n\n            planner_actions_in[planner_action_length:].fill_(0)\n            planner_mask = planner_actions_out.clone().gt(-1)\n            if len(planner_actions_out) > planner_action_length:\n                planner_actions_out[planner_action_length:].fill_(0)\n\n            controller_img_feats = np.zeros(\n                (self.actions.shape[1], img_feats.shape[1]), dtype=np.float32)\n            controller_img_feats[:controller_action_length] = img_feats[\n                controller_pos_queue_idx]\n\n            controller_actions_in = actions[1:].clone() - 2\n            if len(controller_actions_in) > controller_action_length:\n                controller_actions_in[controller_action_length:].fill_(0)\n\n            controller_out = controller_actions\n            controller_mask = controller_out.clone().gt(-1)\n            if len(controller_out) > controller_action_length:\n                controller_out[controller_action_length:].fill_(0)\n\n            # zero out forced controller return\n            for i in range(controller_action_length):\n                if i >= self.max_controller_actions - 1 and controller_out[i] == 0 and \\\n                        (self.max_controller_actions == 1 or\n                         controller_out[i - self.max_controller_actions + 1:i].sum()\n                         == self.max_controller_actions - 1):\n                    controller_mask[i] = 0\n                    \n            return (idx, question, answer, planner_img_feats,\n                    planner_actions_in, planner_actions_out,\n                    planner_action_length, planner_mask, controller_img_feats,\n                    controller_actions_in, planner_hidden_idx, controller_out,\n                    controller_action_length, controller_mask)\n\n    def __len__(self):\n        if self.input_type == 'ques':\n            return len(self.questions)\n        else:\n            return len(self.available_idx)\n\n\nclass EqaDataLoader(DataLoader):\n    def __init__(self, **kwargs):\n        if 'questions_h5' not in kwargs:\n            raise ValueError('Must give questions_h5')\n        if 'data_json' not in kwargs:\n            raise ValueError('Must give data_json')\n        if 'vocab' not in kwargs:\n            raise ValueError('Must give vocab')\n        if 'input_type' not in kwargs:\n            raise ValueError('Must give input_type')\n        if 'split' not in kwargs:\n            raise ValueError('Must give split')\n        if 'gpu_id' not in kwargs:\n            raise ValueError('Must give gpu_id')\n\n        questions_h5_path = kwargs.pop('questions_h5')\n        data_json = kwargs.pop('data_json')\n        input_type = kwargs.pop('input_type')\n\n        split = kwargs.pop('split')\n        vocab = kwargs.pop('vocab')\n\n        gpu_id = kwargs.pop('gpu_id')\n\n        if 'max_threads_per_gpu' in kwargs:\n            max_threads_per_gpu = kwargs.pop('max_threads_per_gpu')\n        else:\n            max_threads_per_gpu = 10\n\n        if 'to_cache' in kwargs:\n            to_cache = kwargs.pop('to_cache')\n        else:\n            to_cache = False\n\n        if 'target_obj_conn_map_dir' in kwargs:\n            target_obj_conn_map_dir = kwargs.pop('target_obj_conn_map_dir')\n        else:\n            target_obj_conn_map_dir = False\n\n        if 'map_resolution' in kwargs:\n            map_resolution = kwargs.pop('map_resolution')\n        else:\n            map_resolution = 1000\n\n        if 'image' in input_type or 'cnn' in input_type:\n            kwargs['collate_fn'] = eqaCollateCnn\n        elif 'lstm' in input_type:\n            kwargs['collate_fn'] = eqaCollateSeq2seq\n\n        if 'overfit' in kwargs:\n            overfit = kwargs.pop('overfit')\n        else:\n            overfit = False\n\n        if 'max_controller_actions' in kwargs:\n            max_controller_actions = kwargs.pop('max_controller_actions')\n        else:\n            max_controller_actions = 5\n\n        if 'max_actions' in kwargs:\n            max_actions = kwargs.pop('max_actions')\n        else:\n            max_actions = None \n\n        print('Reading questions from ', questions_h5_path)\n        with h5py.File(questions_h5_path, 'r') as questions_h5:\n            self.dataset = EqaDataset(\n                questions_h5,\n                vocab,\n                num_frames=kwargs.pop('num_frames'),\n                data_json=data_json,\n                split=split,\n                gpu_id=gpu_id,\n                input_type=input_type,\n                max_threads_per_gpu=max_threads_per_gpu,\n                to_cache=to_cache,\n                target_obj_conn_map_dir=target_obj_conn_map_dir,\n                map_resolution=map_resolution,\n                overfit=overfit,\n                max_controller_actions=max_controller_actions,\n                max_actions=max_actions)\n\n        super(EqaDataLoader, self).__init__(self.dataset, **kwargs)\n\n    def close(self):\n        pass\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        self.close()\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('-train_h5', default='data/04_22/train_v1.h5')\n    parser.add_argument('-val_h5', default='data/04_22/val_v1.h5')\n    parser.add_argument('-data_json', default='data/04_22/data_v1.json')\n    parser.add_argument('-vocab_json', default='data/04_22/vocab_v1.json')\n\n    parser.add_argument(\n        '-input_type', default='ques', choices=['ques', 'ques,image'])\n    parser.add_argument(\n        '-num_frames', default=5,\n        type=int)  # -1 = all frames of navigation sequence\n\n    parser.add_argument('-batch_size', default=50, type=int)\n    parser.add_argument('-max_threads_per_gpu', default=10, type=int)\n\n    args = parser.parse_args()\n\n    try:\n        args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n        args.gpus = [int(x) for x in args.gpus]\n    except KeyError:\n        print(\"CPU not supported\")\n        exit()\n\n    train_loader_kwargs = {\n        'questions_h5': args.train_h5,\n        'data_json': args.data_json,\n        'vocab': args.vocab_json,\n        'batch_size': args.batch_size,\n        'input_type': args.input_type,\n        'num_frames': args.num_frames,\n        'split': 'train',\n        'max_threads_per_gpu': args.max_threads_per_gpu,\n        'gpu_id': args.gpus[0],\n        'cache_path': False,\n    }\n\n    train_loader = EqaDataLoader(**train_loader_kwargs)\n    train_loader.dataset._load_envs(start_idx=0, in_order=True)\n    t = 0\n\n    while True:\n        done = False\n        all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()\n        while done == False:\n            print('[Size:%d][t:%d][Cache:%d]' %\n                  (len(train_loader.dataset), t,\n                   len(train_loader.dataset.img_data_cache)))\n            for batch in train_loader:\n                t += 1\n\n            if all_envs_loaded == False:\n                train_loader.dataset._load_envs(in_order=True)\n                if len(train_loader.dataset.pruned_env_set) == 0:\n                    done = True\n            else:\n                done = True\n"
  },
  {
    "path": "training/metrics.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport pdb\n\nimport copy\nimport json\nimport time\nimport os, sys\nimport argparse\nimport numpy as np\n\nclass Metric():\n    def __init__(self, info={}, metric_names=[], log_json=None):\n        self.info = info\n        self.metric_names = metric_names\n\n        self.metrics = [[None,None,None] for _ in self.metric_names]\n\n        self.stats = []\n        self.num_iters = 0\n\n        self.log_json = log_json\n\n    def update(self, values):\n        assert isinstance(values, list)\n\n        self.num_iters += 1\n        current_stats = []\n\n        for i in range(len(values)):\n            if values[i] is None:\n                continue\n\n            if isinstance(values[i], list) == False:\n                values[i] = [values[i]]\n\n            if self.metrics[i][0] == None:\n                self.metrics[i][0] = np.mean(values[i])\n                self.metrics[i][1] = np.mean(values[i])\n                self.metrics[i][2] = np.mean(values[i])\n            else:\n                self.metrics[i][0] = (self.metrics[i][0] * (self.num_iters - 1) + np.mean(values[i])) / self.num_iters\n                self.metrics[i][1] = 0.95 * self.metrics[i][1] + 0.05 * np.mean(values[i])\n                self.metrics[i][2] = np.mean(values[i])\n\n            self.metrics[i][0] = float(self.metrics[i][0])\n            self.metrics[i][1] = float(self.metrics[i][1])\n            self.metrics[i][2] = float(self.metrics[i][2])\n\n            current_stats.append(self.metrics[i])\n\n        self.stats.append(copy.deepcopy(current_stats))\n\n    def get_stat_string(self, mode=1):\n\n        stat_string = ''\n\n        for k, v in self.info.items():\n            stat_string += '[%s:%s]' % (k, v)\n\n        stat_string += '[iters:%d]' % self.num_iters\n\n        for i in range(len(self.metric_names)):\n            stat_string += '[%s:%.05f]' % (self.metric_names[i], self.metrics[i][mode])\n\n        return stat_string\n\n    def dump_log(self):\n\n        if self.log_json == None:\n            return False\n\n        dict_to_save = {\n            'metric_names': self.metric_names,\n            'stats': self.stats\n        }\n\n        json.dump(dict_to_save, open(self.log_json, 'w'))\n\n        return True\n\nclass VqaMetric(Metric):\n    def __init__(self, info={}, metric_names=[], log_json=None):\n        Metric.__init__(self, info, metric_names, log_json)\n\n    def compute_ranks(self, scores, labels):\n        accuracy = np.zeros(len(labels))\n        ranks = np.full(len(labels), scores.shape[1])\n\n        for i in range(scores.shape[0]):\n            ranks[i] = scores[i].gt(scores[i][labels[i]]).sum() + 1\n            if ranks[i] == 1:\n                accuracy[i] = 1\n\n        return accuracy, ranks\n\nclass NavMetric(Metric):\n    def __init__(self, info={}, metric_names=[], log_json=None):\n        Metric.__init__(self, info, metric_names, log_json)\n"
  },
  {
    "path": "training/models.py",
    "content": "# Model defs for navigation and question answering\n# Navigation: CNN, LSTM, Planner-controller\n# VQA: question-only, 5-frame + attention\n\nimport time\nimport h5py\nimport math\nimport argparse\nimport numpy as np\nimport os, sys, json\n\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nimport pdb\n\n\ndef build_mlp(input_dim,\n              hidden_dims,\n              output_dim,\n              use_batchnorm=False,\n              dropout=0,\n              add_sigmoid=1):\n    layers = []\n    D = input_dim\n    if dropout > 0:\n        layers.append(nn.Dropout(p=dropout))\n    if use_batchnorm:\n        layers.append(nn.BatchNorm1d(input_dim))\n    for dim in hidden_dims:\n        layers.append(nn.Linear(D, dim))\n        if use_batchnorm:\n            layers.append(nn.BatchNorm1d(dim))\n        if dropout > 0:\n            layers.append(nn.Dropout(p=dropout))\n        layers.append(nn.ReLU(inplace=True))\n        D = dim\n    layers.append(nn.Linear(D, output_dim))\n\n    if add_sigmoid == 1:\n        layers.append(nn.Sigmoid())\n\n    return nn.Sequential(*layers)\n\n\ndef get_state(m):\n    if m is None:\n        return None\n    state = {}\n    for k, v in m.state_dict().items():\n        state[k] = v.clone()\n    return state\n\n\ndef repackage_hidden(h, batch_size):\n    # wraps hidden states in new Variables, to detach them from their history\n    if type(h) == Variable:\n        return Variable(\n            h.data.resize_(h.size(0), batch_size, h.size(2)).zero_())\n    else:\n        return tuple(repackage_hidden(v, batch_size) for v in h)\n\n\ndef ensure_shared_grads(model, shared_model):\n    for param, shared_param in zip(model.parameters(),\n                                   shared_model.parameters()):\n        if shared_param.grad is not None:\n            return\n        shared_param._grad = param.grad\n\n\nclass MaskedNLLCriterion(nn.Module):\n    def __init__(self):\n        super(MaskedNLLCriterion, self).__init__()\n\n    def forward(self, input, target, mask):\n\n        logprob_select = torch.gather(input, 1, target)\n\n        out = torch.masked_select(logprob_select, mask)\n\n        loss = -torch.sum(out) / mask.float().sum()\n        return loss\n\nclass MultitaskCNNOutput(nn.Module):\n    def __init__(\n            self,\n            num_classes=191,\n            pretrained=True,\n            checkpoint_path='models/03_13_h3d_hybrid_cnn.pt'\n    ):\n        super(MultitaskCNNOutput, self).__init__()\n\n        self.num_classes = num_classes\n        self.conv_block1 = nn.Sequential(\n            nn.Conv2d(3, 8, 5),\n            nn.BatchNorm2d(8),\n            nn.ReLU(inplace=True),\n            nn.MaxPool2d(2, 2))\n        self.conv_block2 = nn.Sequential(\n            nn.Conv2d(8, 16, 5),\n            nn.BatchNorm2d(16),\n            nn.ReLU(inplace=True),\n            nn.MaxPool2d(2, 2))\n        self.conv_block3 = nn.Sequential(\n            nn.Conv2d(16, 32, 5),\n            nn.BatchNorm2d(32),\n            nn.ReLU(inplace=True),\n            nn.MaxPool2d(2, 2))\n        self.conv_block4 = nn.Sequential(\n            nn.Conv2d(32, 32, 5),\n            nn.BatchNorm2d(32),\n            nn.ReLU(inplace=True),\n            nn.MaxPool2d(2, 2))\n        self.classifier = nn.Sequential(\n            nn.Conv2d(32, 512, 5),\n            nn.BatchNorm2d(512),\n            nn.ReLU(inplace=True),\n            nn.Dropout2d(),\n            nn.Conv2d(512, 512, 1),\n            nn.BatchNorm2d(512),\n            nn.ReLU(inplace=True),\n            nn.Dropout2d())\n\n        self.encoder_seg = nn.Conv2d(512, self.num_classes, 1)\n        self.encoder_depth = nn.Conv2d(512, 1, 1)\n        self.encoder_ae = nn.Conv2d(512, 3, 1)\n\n        self.score_pool2_seg = nn.Conv2d(16, self.num_classes, 1)\n        self.score_pool3_seg = nn.Conv2d(32, self.num_classes, 1)\n\n        self.score_pool2_depth = nn.Conv2d(16, 1, 1)\n        self.score_pool3_depth = nn.Conv2d(32, 1, 1)\n\n        self.score_pool2_ae = nn.Conv2d(16, 3, 1)\n        self.score_pool3_ae = nn.Conv2d(32, 3, 1)\n\n        self.pretrained = pretrained\n        if self.pretrained == True:\n            print('Loading CNN weights from %s' % checkpoint_path)\n            checkpoint = torch.load(\n                checkpoint_path, map_location={'cuda:0': 'cpu'})\n            self.load_state_dict(checkpoint['model_state'])\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            for m in self.modules():\n                if isinstance(m, nn.Conv2d):\n                    n = m.kernel_size[0] * m.kernel_size[1] * (\n                        m.out_channels + m.in_channels)\n                    m.weight.data.normal_(0, math.sqrt(2. / n))\n                elif isinstance(m, nn.BatchNorm2d):\n                    m.weight.data.fill_(1)\n                    m.bias.data.zero_()\n\n    def forward(self, x):\n\n        conv1 = self.conv_block1(x)\n        conv2 = self.conv_block2(conv1)\n        conv3 = self.conv_block3(conv2)\n        conv4 = self.conv_block4(conv3)\n\n        encoder_output = self.classifier(conv4)\n\n        encoder_output_seg = self.encoder_seg(encoder_output)\n        encoder_output_depth = self.encoder_depth(encoder_output)\n        encoder_output_ae = self.encoder_ae(encoder_output)\n\n        score_pool2_seg = self.score_pool2_seg(conv2)\n        score_pool3_seg = self.score_pool3_seg(conv3)\n\n        score_pool2_depth = self.score_pool2_depth(conv2)\n        score_pool3_depth = self.score_pool3_depth(conv3)\n\n        score_pool2_ae = self.score_pool2_ae(conv2)\n        score_pool3_ae = self.score_pool3_ae(conv3)\n\n        score_seg = F.upsample(encoder_output_seg, score_pool3_seg.size()[2:], mode='bilinear')\n        score_seg += score_pool3_seg\n        score_seg = F.upsample(score_seg, score_pool2_seg.size()[2:], mode='bilinear')\n        score_seg += score_pool2_seg\n        out_seg = F.upsample(score_seg, x.size()[2:], mode='bilinear')\n\n        score_depth = F.upsample(encoder_output_depth, score_pool3_depth.size()[2:], mode='bilinear')\n        score_depth += score_pool3_depth\n        score_depth = F.upsample(score_depth, score_pool2_depth.size()[2:], mode='bilinear')\n        score_depth += score_pool2_depth\n        out_depth = F.sigmoid(F.upsample(score_depth, x.size()[2:], mode='bilinear'))\n\n        score_ae = F.upsample(encoder_output_ae, score_pool3_ae.size()[2:], mode='bilinear')\n        score_ae += score_pool3_ae\n        score_ae = F.upsample(score_ae, score_pool2_ae.size()[2:], mode='bilinear')\n        score_ae += score_pool2_ae\n        out_ae = F.sigmoid(F.upsample(score_ae, x.size()[2:], mode='bilinear'))\n\n        return out_seg, out_depth, out_ae\n\nclass MultitaskCNN(nn.Module):\n    def __init__(\n            self,\n            num_classes=191,\n            pretrained=True,\n            checkpoint_path='models/03_13_h3d_hybrid_cnn.pt'\n    ):\n        super(MultitaskCNN, self).__init__()\n\n        self.num_classes = num_classes\n        self.conv_block1 = nn.Sequential(\n            nn.Conv2d(3, 8, 5),\n            nn.BatchNorm2d(8),\n            nn.ReLU(inplace=True),\n            nn.MaxPool2d(2, 2))\n        self.conv_block2 = nn.Sequential(\n            nn.Conv2d(8, 16, 5),\n            nn.BatchNorm2d(16),\n            nn.ReLU(inplace=True),\n            nn.MaxPool2d(2, 2))\n        self.conv_block3 = nn.Sequential(\n            nn.Conv2d(16, 32, 5),\n            nn.BatchNorm2d(32),\n            nn.ReLU(inplace=True),\n            nn.MaxPool2d(2, 2))\n        self.conv_block4 = nn.Sequential(\n            nn.Conv2d(32, 32, 5),\n            nn.BatchNorm2d(32),\n            nn.ReLU(inplace=True),\n            nn.MaxPool2d(2, 2))\n        self.classifier = nn.Sequential(\n            nn.Conv2d(32, 512, 5),\n            nn.BatchNorm2d(512),\n            nn.ReLU(inplace=True),\n            nn.Dropout2d(),\n            nn.Conv2d(512, 512, 1),\n            nn.BatchNorm2d(512),\n            nn.ReLU(inplace=True),\n            nn.Dropout2d())\n\n        self.encoder_seg = nn.Conv2d(512, self.num_classes, 1)\n        self.encoder_depth = nn.Conv2d(512, 1, 1)\n        self.encoder_ae = nn.Conv2d(512, 3, 1)\n\n        self.score_pool2_seg = nn.Conv2d(16, self.num_classes, 1)\n        self.score_pool3_seg = nn.Conv2d(32, self.num_classes, 1)\n\n        self.score_pool2_depth = nn.Conv2d(16, 1, 1)\n        self.score_pool3_depth = nn.Conv2d(32, 1, 1)\n\n        self.score_pool2_ae = nn.Conv2d(16, 3, 1)\n        self.score_pool3_ae = nn.Conv2d(32, 3, 1)\n\n        self.pretrained = pretrained\n        if self.pretrained == True:\n            print('Loading CNN weights from %s' % checkpoint_path)\n            checkpoint = torch.load(\n                checkpoint_path, map_location={'cuda:0': 'cpu'})\n            self.load_state_dict(checkpoint['model_state'])\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            for m in self.modules():\n                if isinstance(m, nn.Conv2d):\n                    n = m.kernel_size[0] * m.kernel_size[1] * (\n                        m.out_channels + m.in_channels)\n                    m.weight.data.normal_(0, math.sqrt(2. / n))\n                elif isinstance(m, nn.BatchNorm2d):\n                    m.weight.data.fill_(1)\n                    m.bias.data.zero_()\n\n    def forward(self, x):\n\n        assert self.training == False\n\n        conv1 = self.conv_block1(x)\n        conv2 = self.conv_block2(conv1)\n        conv3 = self.conv_block3(conv2)\n        conv4 = self.conv_block4(conv3)\n\n        return conv4.view(-1, 32 * 10 * 10)\n\n        # encoder_output = self.classifier(conv4)\n\n        # encoder_output_seg = self.encoder_seg(encoder_output)\n        # encoder_output_depth = self.encoder_depth(encoder_output)\n        # encoder_output_ae = self.encoder_ae(encoder_output)\n\n        # score_pool2_seg = self.score_pool2_seg(conv2)\n        # score_pool3_seg = self.score_pool3_seg(conv3)\n\n        # score_pool2_depth = self.score_pool2_depth(conv2)\n        # score_pool3_depth = self.score_pool3_depth(conv3)\n\n        # score_pool2_ae = self.score_pool2_ae(conv2)\n        # score_pool3_ae = self.score_pool3_ae(conv3)\n\n        # score_seg = F.upsample(encoder_output_seg, score_pool3_seg.size()[2:], mode='bilinear')\n        # score_seg += score_pool3_seg\n        # score_seg = F.upsample(score_seg, score_pool2_seg.size()[2:], mode='bilinear')\n        # score_seg += score_pool2_seg\n        # out_seg = F.upsample(score_seg, x.size()[2:], mode='bilinear')\n\n        # score_depth = F.upsample(encoder_output_depth, score_pool3_depth.size()[2:], mode='bilinear')\n        # score_depth += score_pool3_depth\n        # score_depth = F.upsample(score_depth, score_pool2_depth.size()[2:], mode='bilinear')\n        # score_depth += score_pool2_depth\n        # out_depth = F.sigmoid(F.upsample(score_depth, x.size()[2:], mode='bilinear'))\n\n        # score_ae = F.upsample(encoder_output_ae, score_pool3_ae.size()[2:], mode='bilinear')\n        # score_ae += score_pool3_ae\n        # score_ae = F.upsample(score_ae, score_pool2_ae.size()[2:], mode='bilinear')\n        # score_ae += score_pool2_ae\n        # out_ae = F.sigmoid(F.upsample(score_ae, x.size()[2:], mode='bilinear'))\n\n        # return out_seg, out_depth, out_ae\n\n\nclass QuestionLstmEncoder(nn.Module):\n    def __init__(self,\n                 token_to_idx,\n                 wordvec_dim=64,\n                 rnn_dim=64,\n                 rnn_num_layers=2,\n                 rnn_dropout=0):\n        super(QuestionLstmEncoder, self).__init__()\n        self.token_to_idx = token_to_idx\n        self.NULL = token_to_idx['<NULL>']\n        self.START = token_to_idx['<START>']\n        self.END = token_to_idx['<END>']\n\n        self.embed = nn.Embedding(len(token_to_idx), wordvec_dim)\n        self.rnn = nn.LSTM(\n            wordvec_dim,\n            rnn_dim,\n            rnn_num_layers,\n            dropout=rnn_dropout,\n            batch_first=True)\n\n        self.init_weights()\n\n    def init_weights(self):\n        initrange = 0.1\n        self.embed.weight.data.uniform_(-initrange, initrange)\n\n    def forward(self, x):\n        N, T = x.size()\n        idx = torch.LongTensor(N).fill_(T - 1)\n\n        # Find the last non-null element in each sequence\n        x_cpu = x.data.cpu()\n        for i in range(N):\n            for t in range(T - 1):\n                if x_cpu[i, t] != self.NULL and x_cpu[i, t + 1] == self.NULL:\n                    idx[i] = t\n                    break\n        idx = idx.type_as(x.data).long()\n        idx = Variable(idx, requires_grad=False)\n\n        hs, _ = self.rnn(self.embed(x))\n\n        idx = idx.view(N, 1, 1).expand(N, 1, hs.size(2))\n        H = hs.size(2)\n        return hs.gather(1, idx).view(N, H)\n\n\n# ----------- VQA -----------\n\n\nclass VqaLstmModel(nn.Module):\n    def __init__(self,\n                 vocab,\n                 rnn_wordvec_dim=64,\n                 rnn_dim=64,\n                 rnn_num_layers=2,\n                 rnn_dropout=0.5,\n                 fc_use_batchnorm=False,\n                 fc_dropout=0.5,\n                 fc_dims=(64, )):\n        super(VqaLstmModel, self).__init__()\n        rnn_kwargs = {\n            'token_to_idx': vocab['questionTokenToIdx'],\n            'wordvec_dim': rnn_wordvec_dim,\n            'rnn_dim': rnn_dim,\n            'rnn_num_layers': rnn_num_layers,\n            'rnn_dropout': rnn_dropout,\n        }\n        self.rnn = QuestionLstmEncoder(**rnn_kwargs)\n\n        classifier_kwargs = {\n            'input_dim': rnn_dim,\n            'hidden_dims': fc_dims,\n            'output_dim': len(vocab['answerTokenToIdx']),\n            'use_batchnorm': fc_use_batchnorm,\n            'dropout': fc_dropout,\n            'add_sigmoid': 0\n        }\n        self.classifier = build_mlp(**classifier_kwargs)\n\n    def forward(self, questions):\n        q_feats = self.rnn(questions)\n        scores = self.classifier(q_feats)\n        return scores\n\n\nclass VqaLstmCnnAttentionModel(nn.Module):\n    def __init__(self,\n                 vocab,\n                 image_feat_dim=64,\n                 question_wordvec_dim=64,\n                 question_hidden_dim=64,\n                 question_num_layers=2,\n                 question_dropout=0.5,\n                 fc_use_batchnorm=False,\n                 fc_dropout=0.5,\n                 fc_dims=(64, )):\n        super(VqaLstmCnnAttentionModel, self).__init__()\n\n        cnn_kwargs = {'num_classes': 191, 'pretrained': True}\n        self.cnn = MultitaskCNN(**cnn_kwargs)\n        self.cnn_fc_layer = nn.Sequential(\n            nn.Linear(32 * 10 * 10, 64), nn.ReLU(), nn.Dropout(p=0.5))\n\n        q_rnn_kwargs = {\n            'token_to_idx': vocab['questionTokenToIdx'],\n            'wordvec_dim': question_wordvec_dim,\n            'rnn_dim': question_hidden_dim,\n            'rnn_num_layers': question_num_layers,\n            'rnn_dropout': question_dropout,\n        }\n        self.q_rnn = QuestionLstmEncoder(**q_rnn_kwargs)\n\n        self.img_tr = nn.Sequential(nn.Linear(64, 64), nn.Dropout(p=0.5))\n\n        self.ques_tr = nn.Sequential(nn.Linear(64, 64), nn.Dropout(p=0.5))\n\n        classifier_kwargs = {\n            'input_dim': 64,\n            'hidden_dims': fc_dims,\n            'output_dim': len(vocab['answerTokenToIdx']),\n            'use_batchnorm': fc_use_batchnorm,\n            'dropout': fc_dropout,\n            'add_sigmoid': 0\n        }\n        self.classifier = build_mlp(**classifier_kwargs)\n\n        self.att = nn.Sequential(\n            nn.Tanh(), nn.Dropout(p=0.5), nn.Linear(128, 1))\n\n    def forward(self, images, questions):\n\n        N, T, _, _, _ = images.size()\n\n        # bs x 5 x 3 x 224 x 224\n        img_feats = self.cnn(images.contiguous().view(\n            -1, images.size(2), images.size(3), images.size(4)))\n        img_feats = self.cnn_fc_layer(img_feats)\n\n        img_feats_tr = self.img_tr(img_feats)\n\n        ques_feats = self.q_rnn(questions)\n        ques_feats_repl = ques_feats.view(N, 1, -1).repeat(1, T, 1)\n        ques_feats_repl = ques_feats_repl.view(N * T, -1)\n\n        ques_feats_tr = self.ques_tr(ques_feats_repl)\n\n        ques_img_feats = torch.cat([ques_feats_tr, img_feats_tr], 1)\n\n        att_feats = self.att(ques_img_feats)\n        att_probs = F.softmax(att_feats.view(N, T), dim=1)\n        att_probs2 = att_probs.view(N, T, 1).repeat(1, 1, 64)\n\n        att_img_feats = torch.mul(att_probs2, img_feats.view(N, T, 64))\n        att_img_feats = torch.sum(att_img_feats, dim=1)\n\n        mul_feats = torch.mul(ques_feats, att_img_feats)\n\n        scores = self.classifier(mul_feats)\n\n        return scores, att_probs\n\n\n# ----------- Nav -----------\n\n\nclass NavCnnModel(nn.Module):\n    def __init__(self,\n                 num_frames=5,\n                 num_actions=4,\n                 question_input=False,\n                 question_vocab=False,\n                 question_wordvec_dim=64,\n                 question_hidden_dim=64,\n                 question_num_layers=2,\n                 question_dropout=0.5,\n                 fc_use_batchnorm=False,\n                 fc_dropout=0.5,\n                 fc_dims=(64, )):\n        super(NavCnnModel, self).__init__()\n\n        # cnn_kwargs = {'num_classes': 191, 'pretrained': True}\n        # self.cnn = MultitaskCNN(**cnn_kwargs)\n        self.cnn_fc_layer = nn.Sequential(\n            nn.Linear(32 * 10 * 10, 64), nn.ReLU(), nn.Dropout(p=0.5))\n\n        self.question_input = question_input\n        if self.question_input == True:\n            q_rnn_kwargs = {\n                'token_to_idx': question_vocab['questionTokenToIdx'],\n                'wordvec_dim': question_wordvec_dim,\n                'rnn_dim': question_hidden_dim,\n                'rnn_num_layers': question_num_layers,\n                'rnn_dropout': question_dropout,\n            }\n            self.q_rnn = QuestionLstmEncoder(**q_rnn_kwargs)\n            self.ques_tr = nn.Sequential(\n                nn.Linear(64, 64), nn.ReLU(), nn.Dropout(p=0.5))\n\n        classifier_kwargs = {\n            'input_dim': 64 * num_frames + self.question_input * 64,\n            'hidden_dims': fc_dims,\n            'output_dim': num_actions,\n            'use_batchnorm': fc_use_batchnorm,\n            'dropout': fc_dropout,\n            'add_sigmoid': 0\n        }\n        self.classifier = build_mlp(**classifier_kwargs)\n\n    # batch forward, for supervised learning\n    def forward(self, img_feats, questions=None):\n\n        # bs x 5 x 3200\n        N, T, _ = img_feats.size()\n\n        img_feats = self.cnn_fc_layer(img_feats)\n\n        img_feats = img_feats.view(N, T, -1)\n        img_feats = img_feats.view(N, -1)\n\n        if self.question_input == True:\n            ques_feats = self.q_rnn(questions)\n            ques_feats = self.ques_tr(ques_feats)\n\n            img_feats = torch.cat([ques_feats, img_feats], 1)\n\n        scores = self.classifier(img_feats)\n\n        return scores\n\nclass NavRnnMult(nn.Module):\n    def __init__(self,\n                 image_input=False,\n                 image_feat_dim=128,\n                 question_input=False,\n                 question_embed_dim=128,\n                 action_input=False,\n                 action_embed_dim=32,\n                 num_actions=4,\n                 mode='sl',\n                 rnn_type='LSTM',\n                 rnn_hidden_dim=128,\n                 rnn_num_layers=2,\n                 rnn_dropout=0,\n                 return_states=False):\n        super(NavRnnMult, self).__init__()\n\n        self.image_input = image_input\n        self.image_feat_dim = image_feat_dim\n\n        self.question_input = question_input\n        self.question_embed_dim = question_embed_dim\n\n        self.action_input = action_input\n        self.action_embed_dim = action_embed_dim\n\n        self.num_actions = num_actions\n\n        self.rnn_type = rnn_type\n        self.rnn_hidden_dim = rnn_hidden_dim\n        self.rnn_num_layers = rnn_num_layers\n\n        self.return_states = return_states\n\n        rnn_input_dim = 0\n        if self.image_input == True:\n            rnn_input_dim += image_feat_dim\n            print('Adding input to %s: image, rnn dim: %d' % (self.rnn_type,\n                                                              rnn_input_dim))\n\n        if self.question_input == True:\n            #rnn_input_dim += question_embed_dim\n            print('Adding input to %s: question, rnn dim: %d' %\n                  (self.rnn_type, rnn_input_dim))\n\n        if self.action_input == True:\n            self.action_embed = nn.Embedding(num_actions, action_embed_dim)\n            rnn_input_dim += action_embed_dim\n            print('Adding input to %s: action, rnn dim: %d' % (self.rnn_type,\n                                                               rnn_input_dim))\n\n        self.rnn = getattr(nn, self.rnn_type)(\n            rnn_input_dim,\n            self.rnn_hidden_dim,\n            self.rnn_num_layers,\n            dropout=rnn_dropout,\n            batch_first=True)\n        print('Building %s with hidden dim: %d' % (self.rnn_type,\n                                                   rnn_hidden_dim))\n\n        self.decoder = nn.Linear(self.rnn_hidden_dim, self.num_actions)\n\n    def init_hidden(self, bsz):\n        weight = next(self.parameters()).data\n        if self.rnn_type == 'LSTM':\n            return (Variable(\n                weight.new(self.rnn_num_layers, bsz, self.rnn_hidden_dim)\n                .zero_()), Variable(\n                    weight.new(self.rnn_num_layers, bsz, self.rnn_hidden_dim)\n                    .zero_()))\n        elif self.rnn_type == 'GRU':\n            return Variable(\n                weight.new(self.rnn_num_layers, bsz, self.rnn_hidden_dim)\n                .zero_())\n\n    def forward(self,\n                img_feats,\n                question_feats,\n                actions_in,\n                action_lengths,\n                hidden=False):\n        input_feats = Variable()\n\n        T = False\n        if self.image_input == True:\n            N, T, _ = img_feats.size()\n            input_feats = img_feats\n\n        if self.question_input == True:\n            N, D = question_feats.size()\n            question_feats = question_feats.view(N, 1, D)\n            if T == False:\n                T = actions_in.size(1)\n            question_feats = question_feats.repeat(1, T, 1)\n            if len(input_feats) == 0:\n                input_feats = question_feats\n            else:\n                #input_feats = torch.cat([input_feats, question_feats], 2)\n                input_feats = torch.mul(input_feats, question_feats)\n\n        if self.action_input == True:\n            if len(input_feats) == 0:\n                input_feats = self.action_embed(actions_in)\n            else:\n                input_feats = torch.cat(\n                    [input_feats, self.action_embed(actions_in)], 2)\n\n        packed_input_feats = pack_padded_sequence(\n            input_feats, action_lengths, batch_first=True)\n        packed_output, hidden = self.rnn(packed_input_feats)\n        rnn_output, _ = pad_packed_sequence(packed_output, batch_first=True)\n        output = self.decoder(rnn_output.contiguous().view(\n            rnn_output.size(0) * rnn_output.size(1), rnn_output.size(2)))\n\n        if self.return_states == True:\n            return rnn_output, output, hidden\n        else:\n            return output, hidden\n\n    def step_forward(self, img_feats, question_feats, actions_in, hidden):\n        input_feats = Variable()\n\n        T = False\n        if self.image_input == True:\n            N, T, _ = img_feats.size()\n            input_feats = img_feats\n\n        if self.question_input == True:\n            N, D = question_feats.size()\n            question_feats = question_feats.view(N, 1, D)\n            if T == False:\n                T = actions_in.size(1)\n            question_feats = question_feats.repeat(1, T, 1)\n            if len(input_feats) == 0:\n                input_feats = question_feats\n            else:\n                #input_feats = torch.cat([input_feats, question_feats], 2)\n                input_feats = torch.mul(input_feats, question_feats)\n\n        if self.action_input == True:\n            if len(input_feats) == 0:\n                input_feats = self.action_embed(actions_in)\n            else:\n                input_feats = torch.cat(\n                    [input_feats, self.action_embed(actions_in)], 2)\n\n        output, hidden = self.rnn(input_feats, hidden)\n\n        output = self.decoder(output.contiguous().view(\n            output.size(0) * output.size(1), output.size(2)))\n\n        return output, hidden\n\n\nclass NavRnn(nn.Module):\n    def __init__(self,\n                 image_input=False,\n                 image_feat_dim=128,\n                 question_input=False,\n                 question_embed_dim=128,\n                 action_input=False,\n                 action_embed_dim=32,\n                 num_actions=4,\n                 mode='sl',\n                 rnn_type='LSTM',\n                 rnn_hidden_dim=128,\n                 rnn_num_layers=2,\n                 rnn_dropout=0,\n                 return_states=False):\n        super(NavRnn, self).__init__()\n\n        self.image_input = image_input\n        self.image_feat_dim = image_feat_dim\n\n        self.question_input = question_input\n        self.question_embed_dim = question_embed_dim\n\n        self.action_input = action_input\n        self.action_embed_dim = action_embed_dim\n\n        self.num_actions = num_actions\n\n        self.rnn_type = rnn_type\n        self.rnn_hidden_dim = rnn_hidden_dim\n        self.rnn_num_layers = rnn_num_layers\n\n        self.return_states = return_states\n\n        rnn_input_dim = 0\n        if self.image_input == True:\n            rnn_input_dim += image_feat_dim\n            print('Adding input to %s: image, rnn dim: %d' % (self.rnn_type,\n                                                              rnn_input_dim))\n\n        if self.question_input == True:\n            rnn_input_dim += question_embed_dim\n            print('Adding input to %s: question, rnn dim: %d' %\n                  (self.rnn_type, rnn_input_dim))\n\n        if self.action_input == True:\n            self.action_embed = nn.Embedding(num_actions, action_embed_dim)\n            rnn_input_dim += action_embed_dim\n            print('Adding input to %s: action, rnn dim: %d' % (self.rnn_type,\n                                                               rnn_input_dim))\n\n        self.rnn = getattr(nn, self.rnn_type)(\n            rnn_input_dim,\n            self.rnn_hidden_dim,\n            self.rnn_num_layers,\n            dropout=rnn_dropout,\n            batch_first=True)\n        print('Building %s with hidden dim: %d' % (self.rnn_type,\n                                                   rnn_hidden_dim))\n\n        self.decoder = nn.Linear(self.rnn_hidden_dim, self.num_actions)\n\n    def init_hidden(self, bsz):\n        weight = next(self.parameters()).data\n        if self.rnn_type == 'LSTM':\n            return (Variable(\n                weight.new(self.rnn_num_layers, bsz, self.rnn_hidden_dim)\n                .zero_()), Variable(\n                    weight.new(self.rnn_num_layers, bsz, self.rnn_hidden_dim)\n                    .zero_()))\n        elif self.rnn_type == 'GRU':\n            return Variable(\n                weight.new(self.rnn_num_layers, bsz, self.rnn_hidden_dim)\n                .zero_())\n\n    def forward(self,\n                img_feats,\n                question_feats,\n                actions_in,\n                action_lengths,\n                hidden=False):\n        input_feats = Variable()\n\n        T = False\n        if self.image_input == True:\n            N, T, _ = img_feats.size()\n            input_feats = img_feats\n\n        if self.question_input == True:\n            N, D = question_feats.size()\n            question_feats = question_feats.view(N, 1, D)\n            if T == False:\n                T = actions_in.size(1)\n            question_feats = question_feats.repeat(1, T, 1)\n            if len(input_feats) == 0:\n                input_feats = question_feats\n            else:\n                input_feats = torch.cat([input_feats, question_feats], 2)\n\n        if self.action_input == True:\n            if len(input_feats) == 0:\n                input_feats = self.action_embed(actions_in)\n            else:\n                input_feats = torch.cat(\n                    [input_feats, self.action_embed(actions_in)], 2)\n\n        packed_input_feats = pack_padded_sequence(\n            input_feats, action_lengths, batch_first=True)\n        packed_output, hidden = self.rnn(packed_input_feats)\n        rnn_output, _ = pad_packed_sequence(packed_output, batch_first=True)\n        output = self.decoder(rnn_output.contiguous().view(\n            rnn_output.size(0) * rnn_output.size(1), rnn_output.size(2)))\n\n        if self.return_states == True:\n            return rnn_output, output, hidden\n        else:\n            return output, hidden\n\n    def step_forward(self, img_feats, question_feats, actions_in, hidden):\n        input_feats = Variable()\n\n        T = False\n        if self.image_input == True:\n            N, T, _ = img_feats.size()\n            input_feats = img_feats\n\n        if self.question_input == True:\n            N, D = question_feats.size()\n            question_feats = question_feats.view(N, 1, D)\n            if T == False:\n                T = actions_in.size(1)\n            question_feats = question_feats.repeat(1, T, 1)\n            if len(input_feats) == 0:\n                input_feats = question_feats\n            else:\n                input_feats = torch.cat([input_feats, question_feats], 2)\n\n        if self.action_input == True:\n            if len(input_feats) == 0:\n                input_feats = self.action_embed(actions_in)\n            else:\n                input_feats = torch.cat(\n                    [input_feats, self.action_embed(actions_in)], 2)\n\n        output, hidden = self.rnn(input_feats, hidden)\n\n        output = self.decoder(output.contiguous().view(\n            output.size(0) * output.size(1), output.size(2)))\n\n        return output, hidden\n\nclass NavCnnRnnMultModel(nn.Module):\n    def __init__(\n            self,\n            num_output=4,  # forward, left, right, stop\n            rnn_image_input=True,\n            rnn_image_feat_dim=128,\n            question_input=False,\n            question_vocab=False,\n            question_wordvec_dim=64,\n            question_hidden_dim=64,\n            question_num_layers=2,\n            question_dropout=0.5,\n            rnn_question_embed_dim=128,\n            rnn_action_input=True,\n            rnn_action_embed_dim=32,\n            rnn_type='LSTM',\n            rnn_hidden_dim=1024,\n            rnn_num_layers=1,\n            rnn_dropout=0):\n        super(NavCnnRnnMultModel, self).__init__()\n\n        self.cnn_fc_layer = nn.Sequential(\n            nn.Linear(32 * 10 * 10, rnn_image_feat_dim),\n            nn.ReLU(),\n            nn.Dropout(p=0.5))\n\n        self.rnn_hidden_dim = rnn_hidden_dim\n\n        self.question_input = question_input\n        if self.question_input == True:\n            q_rnn_kwargs = {\n                'token_to_idx': question_vocab['questionTokenToIdx'],\n                'wordvec_dim': question_wordvec_dim,\n                'rnn_dim': question_hidden_dim,\n                'rnn_num_layers': question_num_layers,\n                'rnn_dropout': question_dropout,\n            }\n            self.q_rnn = QuestionLstmEncoder(**q_rnn_kwargs)\n            self.ques_tr = nn.Sequential(\n                nn.Linear(64, rnn_image_feat_dim), nn.ReLU(), nn.Dropout(p=0.5))\n\n        self.nav_rnn = NavRnnMult(\n            image_input=rnn_image_input,\n            image_feat_dim=rnn_image_feat_dim,\n            question_input=question_input,\n            question_embed_dim=question_hidden_dim,\n            action_input=rnn_action_input,\n            action_embed_dim=rnn_action_embed_dim,\n            num_actions=num_output,\n            rnn_type=rnn_type,\n            rnn_hidden_dim=rnn_hidden_dim,\n            rnn_num_layers=rnn_num_layers,\n            rnn_dropout=rnn_dropout)\n\n    def forward(self,\n                img_feats,\n                questions,\n                actions_in,\n                action_lengths,\n                hidden=False,\n                step=False):\n        N, T, _ = img_feats.size()\n\n        # B x T x 128\n        img_feats = self.cnn_fc_layer(img_feats)\n\n        if self.question_input == True:\n            ques_feats = self.q_rnn(questions)\n            ques_feats = self.ques_tr(ques_feats)\n\n            if step == True:\n                output, hidden = self.nav_rnn.step_forward(\n                    img_feats, ques_feats, actions_in, hidden)\n            else:\n                output, hidden = self.nav_rnn(img_feats, ques_feats,\n                                              actions_in, action_lengths)\n        else:\n            if step == True:\n                output, hidden = self.nav_rnn.step_forward(\n                    img_feats, False, actions_in, hidden)\n            else:\n                output, hidden = self.nav_rnn(img_feats, False, actions_in,\n                                              action_lengths)\n\n        return output, hidden\n\n\nclass NavCnnRnnModel(nn.Module):\n    def __init__(\n            self,\n            num_output=4,  # forward, left, right, stop\n            rnn_image_input=True,\n            rnn_image_feat_dim=128,\n            question_input=False,\n            question_vocab=False,\n            question_wordvec_dim=64,\n            question_hidden_dim=64,\n            question_num_layers=2,\n            question_dropout=0.5,\n            rnn_question_embed_dim=128,\n            rnn_action_input=True,\n            rnn_action_embed_dim=32,\n            rnn_type='LSTM',\n            rnn_hidden_dim=1024,\n            rnn_num_layers=1,\n            rnn_dropout=0):\n        super(NavCnnRnnModel, self).__init__()\n\n        self.cnn_fc_layer = nn.Sequential(\n            nn.Linear(32 * 10 * 10, rnn_image_feat_dim),\n            nn.ReLU(),\n            nn.Dropout(p=0.5))\n\n        self.rnn_hidden_dim = rnn_hidden_dim\n\n        self.question_input = question_input\n        if self.question_input == True:\n            q_rnn_kwargs = {\n                'token_to_idx': question_vocab['questionTokenToIdx'],\n                'wordvec_dim': question_wordvec_dim,\n                'rnn_dim': question_hidden_dim,\n                'rnn_num_layers': question_num_layers,\n                'rnn_dropout': question_dropout,\n            }\n            self.q_rnn = QuestionLstmEncoder(**q_rnn_kwargs)\n            self.ques_tr = nn.Sequential(\n                nn.Linear(64, 64), nn.ReLU(), nn.Dropout(p=0.5))\n\n        self.nav_rnn = NavRnn(\n            image_input=rnn_image_input,\n            image_feat_dim=rnn_image_feat_dim,\n            question_input=question_input,\n            question_embed_dim=question_hidden_dim,\n            action_input=rnn_action_input,\n            action_embed_dim=rnn_action_embed_dim,\n            num_actions=num_output,\n            rnn_type=rnn_type,\n            rnn_hidden_dim=rnn_hidden_dim,\n            rnn_num_layers=rnn_num_layers,\n            rnn_dropout=rnn_dropout)\n\n    def forward(self,\n                img_feats,\n                questions,\n                actions_in,\n                action_lengths,\n                hidden=False,\n                step=False):\n        N, T, _ = img_feats.size()\n\n        # B x T x 128\n        img_feats = self.cnn_fc_layer(img_feats)\n\n        if self.question_input == True:\n            ques_feats = self.q_rnn(questions)\n            ques_feats = self.ques_tr(ques_feats)\n\n            if step == True:\n                output, hidden = self.nav_rnn.step_forward(\n                    img_feats, ques_feats, actions_in, hidden)\n            else:\n                output, hidden = self.nav_rnn(img_feats, ques_feats,\n                                              actions_in, action_lengths)\n        else:\n            if step == True:\n                output, hidden = self.nav_rnn.step_forward(\n                    img_feats, False, actions_in, hidden)\n            else:\n                output, hidden = self.nav_rnn(img_feats, False, actions_in,\n                                              action_lengths)\n\n        return output, hidden\n\n\nclass NavPlannerControllerModel(nn.Module):\n    def __init__(self,\n                 question_vocab,\n                 num_output=4,\n                 question_wordvec_dim=64,\n                 question_hidden_dim=64,\n                 question_num_layers=2,\n                 question_dropout=0.5,\n                 planner_rnn_image_feat_dim=128,\n                 planner_rnn_action_embed_dim=32,\n                 planner_rnn_type='GRU',\n                 planner_rnn_hidden_dim=1024,\n                 planner_rnn_num_layers=1,\n                 planner_rnn_dropout=0,\n                 controller_fc_dims=(256, )):\n        super(NavPlannerControllerModel, self).__init__()\n\n        self.cnn_fc_layer = nn.Sequential(\n            nn.Linear(32 * 10 * 10, planner_rnn_image_feat_dim),\n            nn.ReLU(),\n            nn.Dropout(p=0.5))\n\n        q_rnn_kwargs = {\n            'token_to_idx': question_vocab['questionTokenToIdx'],\n            'wordvec_dim': question_wordvec_dim,\n            'rnn_dim': question_hidden_dim,\n            'rnn_num_layers': question_num_layers,\n            'rnn_dropout': question_dropout,\n        }\n        self.q_rnn = QuestionLstmEncoder(**q_rnn_kwargs)\n        self.ques_tr = nn.Sequential(\n            nn.Linear(question_hidden_dim, question_hidden_dim),\n            nn.ReLU(),\n            nn.Dropout(p=0.5))\n\n        self.planner_nav_rnn = NavRnn(\n            image_input=True,\n            image_feat_dim=planner_rnn_image_feat_dim,\n            question_input=True,\n            question_embed_dim=question_hidden_dim,\n            action_input=True,\n            action_embed_dim=planner_rnn_action_embed_dim,\n            num_actions=num_output,\n            rnn_type=planner_rnn_type,\n            rnn_hidden_dim=planner_rnn_hidden_dim,\n            rnn_num_layers=planner_rnn_num_layers,\n            rnn_dropout=planner_rnn_dropout,\n            return_states=True)\n\n        controller_kwargs = {\n            'input_dim':\n            planner_rnn_image_feat_dim + planner_rnn_action_embed_dim +\n            planner_rnn_hidden_dim,\n            'hidden_dims':\n            controller_fc_dims,\n            'output_dim':\n            2,\n            'add_sigmoid':\n            0\n        }\n        self.controller = build_mlp(**controller_kwargs)\n\n    def forward(self,\n                questions,\n                planner_img_feats,\n                planner_actions_in,\n                planner_action_lengths,\n                planner_hidden_index,\n                controller_img_feats,\n                controller_actions_in,\n                controller_action_lengths,\n                planner_hidden=False):\n\n        # ts = time.time()\n        N_p, T_p, _ = planner_img_feats.size()\n\n        planner_img_feats = self.cnn_fc_layer(planner_img_feats)\n        controller_img_feats = self.cnn_fc_layer(controller_img_feats)\n\n        ques_feats = self.q_rnn(questions)\n        ques_feats = self.ques_tr(ques_feats)\n\n        planner_states, planner_scores, planner_hidden = self.planner_nav_rnn(\n            planner_img_feats, ques_feats, planner_actions_in,\n            planner_action_lengths)\n\n        planner_hidden_index = planner_hidden_index[:, :\n                                                    controller_action_lengths.\n                                                    max()]\n        controller_img_feats = controller_img_feats[:, :\n                                                    controller_action_lengths.\n                                                    max()]\n        controller_actions_in = controller_actions_in[:, :\n                                                      controller_action_lengths.\n                                                      max()]\n\n        N_c, T_c, _ = controller_img_feats.size()\n\n        assert planner_hidden_index.max().data[0] < planner_states.size(1)\n\n        planner_hidden_index = planner_hidden_index.contiguous().view(\n            N_p, planner_hidden_index.size(1), 1).repeat(\n                1, 1, planner_states.size(2))\n\n        controller_hidden_in = planner_states.gather(1, planner_hidden_index)\n        controller_hidden_in = controller_hidden_in.view(\n            N_c * T_c, controller_hidden_in.size(2))\n\n        controller_img_feats = controller_img_feats.contiguous().view(\n            N_c * T_c, -1)\n        controller_actions_embed = self.planner_nav_rnn.action_embed(\n            controller_actions_in).view(N_c * T_c, -1)\n\n        controller_in = torch.cat([\n            controller_img_feats, controller_actions_embed,\n            controller_hidden_in\n        ], 1)\n        controller_scores = self.controller(controller_in)\n\n        return planner_scores, controller_scores, planner_hidden\n\n    def planner_step(self, questions, img_feats, actions_in, planner_hidden):\n\n        img_feats = self.cnn_fc_layer(img_feats)\n        ques_feats = self.q_rnn(questions)\n        ques_feats = self.ques_tr(ques_feats)\n        planner_scores, planner_hidden = self.planner_nav_rnn.step_forward(\n            img_feats, ques_feats, actions_in, planner_hidden)\n\n        return planner_scores, planner_hidden\n\n    def controller_step(self, img_feats, actions_in, hidden_in):\n\n        img_feats = self.cnn_fc_layer(img_feats)\n        actions_embed = self.planner_nav_rnn.action_embed(actions_in)\n\n        img_feats = img_feats.view(1, -1)\n        actions_embed = actions_embed.view(1, -1)\n        hidden_in = hidden_in.view(1, -1)\n\n        controller_in = torch.cat([img_feats, actions_embed, hidden_in], 1)\n        controller_scores = self.controller(controller_in)\n\n        return controller_scores\n"
  },
  {
    "path": "training/train_eqa.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport h5py\nimport time\nimport argparse\nimport numpy as np\nimport os, sys, json\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\ntorch.backends.cudnn.enabled = False\nimport torch.multiprocessing as mp\n\nfrom models import NavCnnModel, NavCnnRnnModel, NavPlannerControllerModel, VqaLstmCnnAttentionModel\nfrom data import EqaDataset, EqaDataLoader\nfrom metrics import NavMetric, VqaMetric\n\nfrom models import MaskedNLLCriterion\n\nfrom models import get_state, repackage_hidden, ensure_shared_grads\nfrom data import load_vocab, flat_to_hierarchical_actions\n\ndef eval(rank, args, shared_nav_model, shared_ans_model):\n\n    torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))\n\n    if args.model_type == 'pacman':\n\n        model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}\n        nav_model = NavPlannerControllerModel(**model_kwargs)\n\n    else:\n\n        exit()\n\n    model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n    ans_model = VqaLstmCnnAttentionModel(**model_kwargs)\n\n    eval_loader_kwargs = {\n        'questions_h5': getattr(args, args.eval_split + '_h5'),\n        'data_json': args.data_json,\n        'vocab': args.vocab_json,\n        'target_obj_conn_map_dir': args.target_obj_conn_map_dir,\n        'map_resolution': args.map_resolution,\n        'batch_size': 1,\n        'input_type': args.model_type,\n        'num_frames': 5,\n        'split': args.eval_split,\n        'max_threads_per_gpu': args.max_threads_per_gpu,\n        'gpu_id': args.gpus[rank % len(args.gpus)],\n        'to_cache': False,\n        'max_controller_actions': args.max_controller_actions,\n        'max_actions': args.max_actions\n    }\n\n    eval_loader = EqaDataLoader(**eval_loader_kwargs)\n    print('eval_loader has %d samples' % len(eval_loader.dataset))\n\n    args.output_nav_log_path = os.path.join(args.log_dir,\n                                            'nav_eval_' + str(rank) + '.json')\n    args.output_ans_log_path = os.path.join(args.log_dir,\n                                            'ans_eval_' + str(rank) + '.json')\n\n    t, epoch, best_eval_acc = 0, 0, 0.0\n\n    while epoch < int(args.max_epochs):\n\n        start_time = time.time()\n        invalids = []\n\n        nav_model.load_state_dict(shared_nav_model.state_dict())\n        nav_model.eval()\n\n        ans_model.load_state_dict(shared_ans_model.state_dict())\n        ans_model.eval()\n        ans_model.cuda()\n\n        # that's a lot of numbers\n        nav_metrics = NavMetric(\n            info={'split': args.eval_split,\n                  'thread': rank},\n            metric_names=[\n                'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',\n                'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',\n                'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',\n                'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',\n                'ep_len_30', 'ep_len_50'\n            ],\n            log_json=args.output_nav_log_path)\n\n        vqa_metrics = VqaMetric(\n            info={'split': args.eval_split,\n                  'thread': rank},\n            metric_names=[\n                'accuracy_10', 'accuracy_30', 'accuracy_50', 'mean_rank_10',\n                'mean_rank_30', 'mean_rank_50', 'mean_reciprocal_rank_10',\n                'mean_reciprocal_rank_30', 'mean_reciprocal_rank_50'\n            ],\n            log_json=args.output_ans_log_path)\n\n        if 'pacman' in args.model_type:\n\n            done = False\n\n            while done == False:\n\n                for batch in tqdm(eval_loader):\n\n                    nav_model.load_state_dict(shared_nav_model.state_dict())\n                    nav_model.eval()\n                    nav_model.cuda()\n\n                    idx, question, answer, actions, action_length = batch\n                    metrics_slug = {}\n\n                    h3d = eval_loader.dataset.episode_house\n\n                    # evaluate at multiple initializations\n                    for i in [10, 30, 50]:\n\n                        t += 1\n\n                        if i > action_length[0]:\n                            invalids.append([idx[0], i])\n                            continue\n\n                        question_var = Variable(question.cuda())\n\n                        controller_step = False\n                        planner_hidden = nav_model.planner_nav_rnn.init_hidden(\n                            1)\n\n                        # forward through planner till spawn\n                        (\n                            planner_actions_in, planner_img_feats,\n                            controller_step, controller_action_in,\n                            controller_img_feat, init_pos,\n                            controller_action_counter\n                        ) = eval_loader.dataset.get_hierarchical_features_till_spawn(\n                            actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions\n                        )\n\n                        planner_actions_in_var = Variable(\n                            planner_actions_in.cuda())\n                        planner_img_feats_var = Variable(\n                            planner_img_feats.cuda())\n\n                        for step in range(planner_actions_in.size(0)):\n\n                            planner_scores, planner_hidden = nav_model.planner_step(\n                                question_var, planner_img_feats_var[step].view(\n                                    1, 1,\n                                    3200), planner_actions_in_var[step].view(\n                                        1, 1), planner_hidden)\n\n                        if controller_step == True:\n\n                            controller_img_feat_var = Variable(\n                                controller_img_feat.cuda())\n                            controller_action_in_var = Variable(\n                                torch.LongTensor(1, 1).fill_(\n                                    int(controller_action_in)).cuda())\n\n                            controller_scores = nav_model.controller_step(\n                                controller_img_feat_var.view(1, 1, 3200),\n                                controller_action_in_var.view(1, 1),\n                                planner_hidden[0])\n\n                            prob = F.softmax(controller_scores, dim=1)\n                            controller_action = int(\n                                prob.max(1)[1].data.cpu().numpy()[0])\n\n                            if controller_action == 1:\n                                controller_step = True\n                            else:\n                                controller_step = False\n\n                            action = int(controller_action_in)\n                            action_in = torch.LongTensor(\n                                1, 1).fill_(action + 1).cuda()\n\n                        else:\n\n                            prob = F.softmax(planner_scores, dim=1)\n                            action = int(prob.max(1)[1].data.cpu().numpy()[0])\n\n                            action_in = torch.LongTensor(\n                                1, 1).fill_(action + 1).cuda()\n\n                        h3d.env.reset(\n                            x=init_pos[0], y=init_pos[2], yaw=init_pos[3])\n\n                        init_dist_to_target = h3d.get_dist_to_target(\n                            h3d.env.cam.pos)\n                        if init_dist_to_target < 0:  # unreachable\n                            invalids.append([idx[0], i])\n                            continue\n\n                        episode_length = 0\n                        episode_done = True\n                        controller_action_counter = 0\n\n                        dists_to_target, pos_queue, pred_actions = [\n                            init_dist_to_target\n                        ], [init_pos], []\n                        planner_actions, controller_actions = [], []\n\n                        if action != 3:\n\n                            # take the first step\n                            img, _, _ = h3d.step(action)\n                            img = torch.from_numpy(img.transpose(\n                                2, 0, 1)).float() / 255.0\n                            img_feat_var = eval_loader.dataset.cnn(\n                                Variable(img.view(1, 3, 224,\n                                                  224).cuda())).view(\n                                                      1, 1, 3200)\n\n                            for step in range(args.max_episode_length):\n\n                                episode_length += 1\n\n                                if controller_step == False:\n                                    planner_scores, planner_hidden = nav_model.planner_step(\n                                        question_var, img_feat_var,\n                                        Variable(action_in), planner_hidden)\n\n                                    prob = F.softmax(planner_scores, dim=1)\n                                    action = int(\n                                        prob.max(1)[1].data.cpu().numpy()[0])\n                                    planner_actions.append(action)\n\n                                pred_actions.append(action)\n                                img, _, episode_done = h3d.step(action)\n\n                                episode_done = episode_done or episode_length >= args.max_episode_length\n\n                                img = torch.from_numpy(img.transpose(\n                                    2, 0, 1)).float() / 255.0\n                                img_feat_var = eval_loader.dataset.cnn(\n                                    Variable(img.view(1, 3, 224, 224)\n                                             .cuda())).view(1, 1, 3200)\n\n                                dists_to_target.append(\n                                    h3d.get_dist_to_target(h3d.env.cam.pos))\n                                pos_queue.append([\n                                    h3d.env.cam.pos.x, h3d.env.cam.pos.y,\n                                    h3d.env.cam.pos.z, h3d.env.cam.yaw\n                                ])\n\n                                if episode_done == True:\n                                    break\n\n                                # query controller to continue or not\n                                controller_action_in = Variable(\n                                    torch.LongTensor(1,\n                                                     1).fill_(action).cuda())\n                                controller_scores = nav_model.controller_step(\n                                    img_feat_var, controller_action_in,\n                                    planner_hidden[0])\n\n                                prob = F.softmax(controller_scores, dim=1)\n                                controller_action = int(\n                                    prob.max(1)[1].data.cpu().numpy()[0])\n\n                                if controller_action == 1 and controller_action_counter < 4:\n                                    controller_action_counter += 1\n                                    controller_step = True\n                                else:\n                                    controller_action_counter = 0\n                                    controller_step = False\n                                    controller_action = 0\n\n                                controller_actions.append(controller_action)\n\n                                action_in = torch.LongTensor(\n                                    1, 1).fill_(action + 1).cuda()\n\n                        # run answerer here\n                        if len(pos_queue) < 5:\n                            pos_queue = eval_loader.dataset.episode_pos_queue[len(\n                                pos_queue) - 5:] + pos_queue\n                        images = eval_loader.dataset.get_frames(\n                            h3d, pos_queue[-5:], preprocess=True)\n                        images_var = Variable(\n                            torch.from_numpy(images).cuda()).view(\n                                1, 5, 3, 224, 224)\n                        scores, att_probs = ans_model(images_var, question_var)\n                        ans_acc, ans_rank = vqa_metrics.compute_ranks(\n                            scores.data.cpu(), answer)\n\n                        pred_answer = scores.max(1)[1].data[0]\n\n                        print('[Q_GT]', ' '.join([\n                            eval_loader.dataset.vocab['questionIdxToToken'][x]\n                            for x in question[0] if x != 0\n                        ]))\n                        print('[A_GT]', eval_loader.dataset.vocab[\n                            'answerIdxToToken'][answer[0]])\n                        print('[A_PRED]', eval_loader.dataset.vocab[\n                            'answerIdxToToken'][pred_answer])\n\n                        # compute stats\n                        metrics_slug['accuracy_' + str(i)] = ans_acc[0]\n                        metrics_slug['mean_rank_' + str(i)] = ans_rank[0]\n                        metrics_slug['mean_reciprocal_rank_'\n                                     + str(i)] = 1.0 / ans_rank[0]\n\n                        metrics_slug['d_0_' + str(i)] = dists_to_target[0]\n                        metrics_slug['d_T_' + str(i)] = dists_to_target[-1]\n                        metrics_slug['d_D_' + str(\n                            i)] = dists_to_target[0] - dists_to_target[-1]\n                        metrics_slug['d_min_' + str(i)] = np.array(\n                            dists_to_target).min()\n                        metrics_slug['ep_len_' + str(i)] = episode_length\n                        if action == 3:\n                            metrics_slug['stop_' + str(i)] = 1\n                        else:\n                            metrics_slug['stop_' + str(i)] = 0\n                        inside_room = []\n                        for p in pos_queue:\n                            inside_room.append(\n                                h3d.is_inside_room(\n                                    p, eval_loader.dataset.target_room))\n                        if inside_room[-1] == True:\n                            metrics_slug['r_T_' + str(i)] = 1\n                        else:\n                            metrics_slug['r_T_' + str(i)] = 0\n                        if any([x == True for x in inside_room]) == True:\n                            metrics_slug['r_e_' + str(i)] = 1\n                        else:\n                            metrics_slug['r_e_' + str(i)] = 0\n\n                    # navigation metrics\n                    metrics_list = []\n                    for i in nav_metrics.metric_names:\n                        if i not in metrics_slug:\n                            metrics_list.append(nav_metrics.metrics[\n                                nav_metrics.metric_names.index(i)][0])\n                        else:\n                            metrics_list.append(metrics_slug[i])\n\n                    nav_metrics.update(metrics_list)\n\n                    # vqa metrics\n                    metrics_list = []\n                    for i in vqa_metrics.metric_names:\n                        if i not in metrics_slug:\n                            metrics_list.append(vqa_metrics.metrics[\n                                vqa_metrics.metric_names.index(i)][0])\n                        else:\n                            metrics_list.append(metrics_slug[i])\n\n                    vqa_metrics.update(metrics_list)\n\n                try:\n                    print(nav_metrics.get_stat_string(mode=0))\n                    print(vqa_metrics.get_stat_string(mode=0))\n                except:\n                    pass\n\n                print('epoch', epoch)\n                print('invalids', len(invalids))\n\n                eval_loader.dataset._load_envs()\n                if len(eval_loader.dataset.pruned_env_set) == 0:\n                    done = True\n\n        epoch += 1\n\n        # checkpoint if best val accuracy\n        if vqa_metrics.metrics[2][0] > best_eval_acc:  # ans_acc_50\n            best_eval_acc = vqa_metrics.metrics[2][0]\n            if epoch % args.eval_every == 0 and args.log == True:\n                vqa_metrics.dump_log()\n                nav_metrics.dump_log()\n\n                model_state = get_state(nav_model)\n\n                aad = dict(args.__dict__)\n                ad = {}\n                for i in aad:\n                    if i[0] != '_':\n                        ad[i] = aad[i]\n\n                checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}\n\n                checkpoint_path = '%s/epoch_%d_ans_50_%.04f.pt' % (\n                    args.checkpoint_dir, epoch, best_eval_acc)\n                print('Saving checkpoint to %s' % checkpoint_path)\n                torch.save(checkpoint, checkpoint_path)\n\n        print('[best_eval_ans_acc_50:%.04f]' % best_eval_acc)\n\n        eval_loader.dataset._load_envs(start_idx=0, in_order=True)\n\n\ndef train(rank, args, shared_nav_model, shared_ans_model):\n\n    torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))\n\n    if args.model_type == 'pacman':\n\n        model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}\n        nav_model = NavPlannerControllerModel(**model_kwargs)\n\n    else:\n\n        exit()\n\n    model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n    ans_model = VqaLstmCnnAttentionModel(**model_kwargs)\n\n    optim = torch.optim.SGD(\n        filter(lambda p: p.requires_grad, shared_nav_model.parameters()),\n        lr=args.learning_rate)\n\n    train_loader_kwargs = {\n        'questions_h5': args.train_h5,\n        'data_json': args.data_json,\n        'vocab': args.vocab_json,\n        'target_obj_conn_map_dir': args.target_obj_conn_map_dir,\n        'map_resolution': args.map_resolution,\n        'batch_size': 1,\n        'input_type': args.model_type,\n        'num_frames': 5,\n        'split': 'train',\n        'max_threads_per_gpu': args.max_threads_per_gpu,\n        'gpu_id': args.gpus[rank % len(args.gpus)],\n        'to_cache': args.cache,\n        'max_controller_actions': args.max_controller_actions,\n        'max_actions': args.max_actions\n    }\n\n    args.output_nav_log_path = os.path.join(args.log_dir,\n                                            'nav_train_' + str(rank) + '.json')\n    args.output_ans_log_path = os.path.join(args.log_dir,\n                                            'ans_train_' + str(rank) + '.json')\n\n    nav_model.load_state_dict(shared_nav_model.state_dict())\n    nav_model.cuda()\n\n    ans_model.load_state_dict(shared_ans_model.state_dict())\n    ans_model.eval()\n    ans_model.cuda()\n\n    nav_metrics = NavMetric(\n        info={'split': 'train',\n              'thread': rank},\n        metric_names=[\n            'planner_loss', 'controller_loss', 'reward', 'episode_length'\n        ],\n        log_json=args.output_nav_log_path)\n\n    vqa_metrics = VqaMetric(\n        info={'split': 'train',\n              'thread': rank},\n        metric_names=['accuracy', 'mean_rank', 'mean_reciprocal_rank'],\n        log_json=args.output_ans_log_path)\n\n    train_loader = EqaDataLoader(**train_loader_kwargs)\n\n    print('train_loader has %d samples' % len(train_loader.dataset))\n\n    t, epoch = 0, 0\n    p_losses, c_losses, reward_list, episode_length_list = [], [], [], []\n\n    nav_metrics.update([10.0, 10.0, 0, 100])\n\n    mult = 0.1\n\n    while epoch < int(args.max_epochs):\n\n        if 'pacman' in args.model_type:\n\n            planner_lossFn = MaskedNLLCriterion().cuda()\n            controller_lossFn = MaskedNLLCriterion().cuda()\n\n            done = False\n            all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()\n\n            while done == False:\n\n                for batch in train_loader:\n\n                    nav_model.load_state_dict(shared_nav_model.state_dict())\n                    nav_model.eval()\n                    nav_model.cuda()\n\n                    idx, question, answer, actions, action_length = batch\n                    metrics_slug = {}\n\n                    h3d = train_loader.dataset.episode_house\n\n                    # evaluate at multiple initializations\n                    # for i in [10, 30, 50]:\n\n                    t += 1\n\n                    question_var = Variable(question.cuda())\n\n                    controller_step = False\n                    planner_hidden = nav_model.planner_nav_rnn.init_hidden(1)\n\n                    # forward through planner till spawn\n                    (\n                        planner_actions_in, planner_img_feats,\n                        controller_step, controller_action_in,\n                        controller_img_feat, init_pos,\n                        controller_action_counter\n                    ) = train_loader.dataset.get_hierarchical_features_till_spawn(\n                        actions[0, :action_length[0] + 1].numpy(), max(3, int(mult * action_length[0])), args.max_controller_actions\n                    )\n\n                    planner_actions_in_var = Variable(\n                        planner_actions_in.cuda())\n                    planner_img_feats_var = Variable(planner_img_feats.cuda())\n\n                    for step in range(planner_actions_in.size(0)):\n\n                        planner_scores, planner_hidden = nav_model.planner_step(\n                            question_var, planner_img_feats_var[step].view(\n                                1, 1, 3200), planner_actions_in_var[step].view(\n                                    1, 1), planner_hidden)\n\n                    if controller_step == True:\n\n                        controller_img_feat_var = Variable(\n                            controller_img_feat.cuda())\n                        controller_action_in_var = Variable(\n                            torch.LongTensor(1, 1).fill_(\n                                int(controller_action_in)).cuda())\n\n                        controller_scores = nav_model.controller_step(\n                            controller_img_feat_var.view(1, 1, 3200),\n                            controller_action_in_var.view(1, 1),\n                            planner_hidden[0])\n\n                        prob = F.softmax(controller_scores, dim=1)\n                        controller_action = int(\n                            prob.max(1)[1].data.cpu().numpy()[0])\n\n                        if controller_action == 1:\n                            controller_step = True\n                        else:\n                            controller_step = False\n\n                        action = int(controller_action_in)\n                        action_in = torch.LongTensor(\n                            1, 1).fill_(action + 1).cuda()\n\n                    else:\n\n                        prob = F.softmax(planner_scores, dim=1)\n                        action = int(prob.max(1)[1].data.cpu().numpy()[0])\n\n                        action_in = torch.LongTensor(\n                            1, 1).fill_(action + 1).cuda()\n\n                    h3d.env.reset(\n                        x=init_pos[0], y=init_pos[2], yaw=init_pos[3])\n\n                    init_dist_to_target = h3d.get_dist_to_target(\n                        h3d.env.cam.pos)\n                    if init_dist_to_target < 0:  # unreachable\n                        # invalids.append([idx[0], i])\n                        continue\n\n                    episode_length = 0\n                    episode_done = True\n                    controller_action_counter = 0\n\n                    dists_to_target, pos_queue = [init_dist_to_target], [\n                        init_pos\n                    ]\n\n                    rewards, planner_actions, planner_log_probs, controller_actions, controller_log_probs = [], [], [], [], []\n\n                    if action != 3:\n\n                        # take the first step\n                        img, rwd, episode_done = h3d.step(action, step_reward=True)\n                        img = torch.from_numpy(img.transpose(\n                            2, 0, 1)).float() / 255.0\n                        img_feat_var = train_loader.dataset.cnn(\n                            Variable(img.view(1, 3, 224, 224).cuda())).view(\n                                1, 1, 3200)\n\n                        for step in range(args.max_episode_length):\n\n                            episode_length += 1\n\n                            if controller_step == False:\n                                planner_scores, planner_hidden = nav_model.planner_step(\n                                    question_var, img_feat_var,\n                                    Variable(action_in), planner_hidden)\n\n                                planner_prob = F.softmax(planner_scores, dim=1)\n                                planner_log_prob = F.log_softmax(\n                                    planner_scores, dim=1)\n\n                                action = planner_prob.multinomial().data\n                                planner_log_prob = planner_log_prob.gather(\n                                    1, Variable(action))\n\n                                planner_log_probs.append(\n                                    planner_log_prob.cpu())\n\n                                action = int(action.cpu().numpy()[0, 0])\n                                planner_actions.append(action)\n\n                            img, rwd, episode_done = h3d.step(action, step_reward=True)\n\n                            episode_done = episode_done or episode_length >= args.max_episode_length\n\n                            rewards.append(rwd)\n\n                            img = torch.from_numpy(img.transpose(\n                                2, 0, 1)).float() / 255.0\n                            img_feat_var = train_loader.dataset.cnn(\n                                Variable(img.view(1, 3, 224, 224)\n                                         .cuda())).view(1, 1, 3200)\n\n                            dists_to_target.append(\n                                h3d.get_dist_to_target(h3d.env.cam.pos))\n                            pos_queue.append([\n                                h3d.env.cam.pos.x, h3d.env.cam.pos.y,\n                                h3d.env.cam.pos.z, h3d.env.cam.yaw\n                            ])\n\n                            if episode_done == True:\n                                break\n\n                            # query controller to continue or not\n                            controller_action_in = Variable(\n                                torch.LongTensor(1, 1).fill_(action).cuda())\n                            controller_scores = nav_model.controller_step(\n                                img_feat_var, controller_action_in,\n                                planner_hidden[0])\n\n                            controller_prob = F.softmax(\n                                controller_scores, dim=1)\n                            controller_log_prob = F.log_softmax(\n                                controller_scores, dim=1)\n\n                            controller_action = controller_prob.multinomial(\n                            ).data\n\n                            if int(controller_action[0]\n                                   ) == 1 and controller_action_counter < 4:\n                                controller_action_counter += 1\n                                controller_step = True\n                            else:\n                                controller_action_counter = 0\n                                controller_step = False\n                                controller_action.fill_(0)\n\n                            controller_log_prob = controller_log_prob.gather(\n                                1, Variable(controller_action))\n                            controller_log_probs.append(\n                                controller_log_prob.cpu())\n\n                            controller_action = int(\n                                controller_action.cpu().numpy()[0, 0])\n                            controller_actions.append(controller_action)\n                            action_in = torch.LongTensor(\n                                1, 1).fill_(action + 1).cuda()\n\n                    # run answerer here\n                    ans_acc = [0]\n                    if action == 3:\n                        if len(pos_queue) < 5:\n                            pos_queue = train_loader.dataset.episode_pos_queue[len(\n                                pos_queue) - 5:] + pos_queue\n                        images = train_loader.dataset.get_frames(\n                            h3d, pos_queue[-5:], preprocess=True)\n                        images_var = Variable(\n                            torch.from_numpy(images).cuda()).view(\n                                1, 5, 3, 224, 224)\n                        scores, att_probs = ans_model(images_var, question_var)\n                        ans_acc, ans_rank = vqa_metrics.compute_ranks(\n                            scores.data.cpu(), answer)\n                        vqa_metrics.update([ans_acc, ans_rank, 1.0 / ans_rank])\n\n                    rewards.append(h3d.success_reward * ans_acc[0])\n\n                    R = torch.zeros(1, 1)\n\n                    planner_loss = 0\n                    controller_loss = 0\n\n                    planner_rev_idx = -1\n                    for i in reversed(range(len(rewards))):\n                        R = 0.99 * R + rewards[i]\n                        advantage = R - nav_metrics.metrics[2][1]\n\n                        if i < len(controller_actions):\n                            controller_loss = controller_loss - controller_log_probs[i] * Variable(\n                                advantage)\n\n                            if controller_actions[i] == 0 and planner_rev_idx + len(planner_log_probs) >= 0:\n                                planner_loss = planner_loss - planner_log_probs[planner_rev_idx] * Variable(\n                                    advantage)\n                                planner_rev_idx -= 1\n\n                        elif planner_rev_idx + len(planner_log_probs) >= 0:\n\n                            planner_loss = planner_loss - planner_log_probs[planner_rev_idx] * Variable(\n                                advantage)\n                            planner_rev_idx -= 1\n\n                    controller_loss /= max(1, len(controller_log_probs))\n                    planner_loss /= max(1, len(planner_log_probs))\n\n                    optim.zero_grad()\n\n                    if isinstance(planner_loss, float) == False and isinstance(\n                            controller_loss, float) == False:\n                        p_losses.append(planner_loss.data[0, 0])\n                        c_losses.append(controller_loss.data[0, 0])\n                        reward_list.append(np.sum(rewards))\n                        episode_length_list.append(episode_length)\n\n                        (planner_loss + controller_loss).backward()\n\n                        ensure_shared_grads(nav_model.cpu(), shared_nav_model)\n                        optim.step()\n\n                    if len(reward_list) > 50:\n\n                        nav_metrics.update([\n                            p_losses, c_losses, reward_list,\n                            episode_length_list\n                        ])\n\n                        print(nav_metrics.get_stat_string())\n                        if args.log == True:\n                            nav_metrics.dump_log()\n\n                        if nav_metrics.metrics[2][1] > 0.35:\n                            mult = min(mult + 0.1, 1.0)\n\n                        p_losses, c_losses, reward_list, episode_length_list = [], [], [], []\n\n                if all_envs_loaded == False:\n                    train_loader.dataset._load_envs(in_order=True)\n                    if len(train_loader.dataset.pruned_env_set) == 0:\n                        done = True\n                        if args.cache == False:\n                            train_loader.dataset._load_envs(\n                                start_idx=0, in_order=True)\n                else:\n                    done = True\n\n        epoch += 1\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # data params\n    parser.add_argument('-train_h5', default='data/train.h5')\n    parser.add_argument('-val_h5', default='data/val.h5')\n    parser.add_argument('-test_h5', default='data/test.h5')\n    parser.add_argument('-data_json', default='data/data.json')\n    parser.add_argument('-vocab_json', default='data/vocab.json')\n\n    parser.add_argument(\n        '-target_obj_conn_map_dir',\n        default='/path/to/target-obj-conn-maps/500')\n    parser.add_argument('-map_resolution', default=500, type=int)\n\n    parser.add_argument(\n        '-mode',\n        default='train+eval',\n        type=str,\n        choices=['train', 'eval', 'train+eval'])\n    parser.add_argument('-eval_split', default='val', type=str)\n\n    # model details\n    parser.add_argument(\n        '-model_type',\n        default='pacman',\n        choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'pacman'])\n    parser.add_argument('-max_episode_length', default=100, type=int)\n\n    # optim params\n    parser.add_argument('-batch_size', default=20, type=int)\n    parser.add_argument('-learning_rate', default=1e-5, type=float)\n    parser.add_argument('-max_epochs', default=1000, type=int)\n\n    # bookkeeping\n    parser.add_argument('-print_every', default=5, type=int)\n    parser.add_argument('-eval_every', default=1, type=int)\n    parser.add_argument('-identifier', default='cnn')\n    parser.add_argument('-num_processes', default=1, type=int)\n    parser.add_argument('-max_threads_per_gpu', default=10, type=int)\n\n    # checkpointing\n    parser.add_argument('-nav_checkpoint_path', default=False)\n    parser.add_argument('-ans_checkpoint_path', default=False)\n\n    parser.add_argument('-checkpoint_dir', default='checkpoints/eqa/')\n    parser.add_argument('-log_dir', default='logs/eqa/')\n    parser.add_argument('-log', default=False, action='store_true')\n    parser.add_argument('-cache', default=False, action='store_true')\n    parser.add_argument('-max_controller_actions', type=int, default=5)\n    parser.add_argument('-max_actions', type=int)\n    args = parser.parse_args()\n\n    args.time_id = time.strftime(\"%m_%d_%H:%M\")\n\n    try:\n        args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n        args.gpus = [int(x) for x in args.gpus]\n    except KeyError:\n        print(\"CPU not supported\")\n        exit()\n\n    # Load navigation model\n    if args.nav_checkpoint_path != False:\n        print('Loading navigation checkpoint from %s' % args.nav_checkpoint_path)\n        checkpoint = torch.load(\n            args.nav_checkpoint_path, map_location={\n                'cuda:0': 'cpu'\n            })\n\n        args_to_keep = ['model_type']\n\n        for i in args.__dict__:\n            if i not in args_to_keep:\n                checkpoint['args'][i] = args.__dict__[i]\n\n        args = type('new_dict', (object, ), checkpoint['args'])\n\n    args.checkpoint_dir = os.path.join(args.checkpoint_dir,\n                                       args.time_id + '_' + args.identifier)\n    args.log_dir = os.path.join(args.log_dir,\n                                args.time_id + '_' + args.identifier)\n    print(args.__dict__)\n\n    if not os.path.exists(args.checkpoint_dir) and args.log == True:\n        os.makedirs(args.checkpoint_dir)\n        os.makedirs(args.log_dir)\n\n    if args.model_type == 'pacman':\n\n        model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}\n        shared_nav_model = NavPlannerControllerModel(**model_kwargs)\n\n    else:\n\n        exit()\n\n    shared_nav_model.share_memory()\n\n    if args.nav_checkpoint_path != False:\n        print('Loading navigation params from checkpoint: %s' %\n            args.nav_checkpoint_path)\n        shared_nav_model.load_state_dict(checkpoint['state'])\n\n    # Load answering model\n    if args.ans_checkpoint_path != False:\n        print('Loading answering checkpoint from %s' % args.ans_checkpoint_path)\n        ans_checkpoint = torch.load(\n            args.ans_checkpoint_path, map_location={\n                'cuda:0': 'cpu'\n            })\n\n    ans_model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n    shared_ans_model = VqaLstmCnnAttentionModel(**ans_model_kwargs)\n\n    shared_ans_model.share_memory()\n\n    if args.ans_checkpoint_path != False:\n        print('Loading params from checkpoint: %s' % args.ans_checkpoint_path)\n        shared_ans_model.load_state_dict(ans_checkpoint['state'])\n\n    if args.mode == 'eval':\n\n        eval(0, args, shared_nav_model, shared_ans_model)\n\n    elif args.mode == 'train':\n\n        train(0, args, shared_nav_model, shared_ans_model)\n\n    else:\n\n        processes = []\n\n        p = mp.Process(\n            target=eval, args=(0, args, shared_nav_model, shared_ans_model))\n        p.start()\n        processes.append(p)\n\n        for rank in range(1, args.num_processes + 1):\n            p = mp.Process(\n                target=train,\n                args=(rank, args, shared_nav_model, shared_ans_model))\n            p.start()\n            processes.append(p)\n\n        for p in processes:\n            p.join()"
  },
  {
    "path": "training/train_nav.py",
    "content": "import time\nimport argparse\nfrom datetime import datetime\nimport logging\nimport numpy as np\nimport os\nimport torch\nimport torch.nn.functional as F\nimport torch.multiprocessing as mp\nfrom models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\nfrom data import EqaDataLoader\nfrom metrics import NavMetric\nfrom models import MaskedNLLCriterion\nfrom models import get_state, ensure_shared_grads\nfrom data import load_vocab\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\nimport time\n\ntorch.backends.cudnn.enabled = False\n\n################################################################################################\n#make models trained in pytorch 4 compatible with earlier pytorch versions\nimport torch._utils\ntry:\n    torch._utils._rebuild_tensor_v2\nexcept AttributeError:\n    def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):\n        tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)\n        tensor.requires_grad = requires_grad\n        tensor._backward_hooks = backward_hooks\n        return tensor\n    torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2\n\n################################################################################################\n\ndef eval(rank, args, shared_model):\n\n    torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))\n\n    if args.model_type == 'cnn':\n\n        model_kwargs = {}\n        model = NavCnnModel(**model_kwargs)\n\n    elif args.model_type == 'cnn+q':\n\n        model_kwargs = {\n            'question_input': True,\n            'question_vocab': load_vocab(args.vocab_json)\n        }\n        model = NavCnnModel(**model_kwargs)\n\n    elif args.model_type == 'lstm':\n\n        model_kwargs = {}\n        model = NavCnnRnnModel(**model_kwargs)\n\n    elif args.model_type == 'lstm+q':\n\n        model_kwargs = {\n            'question_input': True,\n            'question_vocab': load_vocab(args.vocab_json)\n        }\n        model = NavCnnRnnModel(**model_kwargs)\n\n    elif args.model_type == 'lstm-mult+q':\n\n        model_kwargs = {\n            'question_input': True,\n            'question_vocab': load_vocab(args.vocab_json)\n        }\n        model = NavCnnRnnMultModel(**model_kwargs)\n\n    elif args.model_type == 'pacman':\n\n        model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}\n        model = NavPlannerControllerModel(**model_kwargs)\n\n    else:\n\n        exit()\n\n    eval_loader_kwargs = {\n        'questions_h5': getattr(args, args.eval_split + '_h5'),\n        'data_json': args.data_json,\n        'vocab': args.vocab_json,\n        'target_obj_conn_map_dir': args.target_obj_conn_map_dir,\n        'map_resolution': args.map_resolution,\n        'batch_size': 1,\n        'input_type': args.model_type,\n        'num_frames': 5,\n        'split': args.eval_split,\n        'max_threads_per_gpu': args.max_threads_per_gpu,\n        'gpu_id': args.gpus[rank % len(args.gpus)],\n        'to_cache': False,\n        'overfit': args.overfit,\n        'max_controller_actions': args.max_controller_actions,\n    }\n\n    eval_loader = EqaDataLoader(**eval_loader_kwargs)\n    print('eval_loader has %d samples' % len(eval_loader.dataset))\n    logging.info(\"EVAL: eval_loader has {} samples\".format(len(eval_loader.dataset)))\n\n    args.output_log_path = os.path.join(args.log_dir,\n                                        'eval_' + str(rank) + '.json')\n\n    t, epoch, best_eval_acc = 0, 0, 0.0\n\n    max_epochs = args.max_epochs\n    if args.mode == 'eval':\n        max_epochs = 1\n    while epoch < int(max_epochs):\n\n        invalids = []\n\n        model.load_state_dict(shared_model.state_dict())\n        model.eval()\n\n        # that's a lot of numbers\n        metrics = NavMetric(\n            info={'split': args.eval_split,\n                  'thread': rank},\n            metric_names=[\n                'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',\n                'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',\n                'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',\n                'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',\n                'ep_len_30', 'ep_len_50'\n            ],\n            log_json=args.output_log_path)\n\n        if 'cnn' in args.model_type:\n\n            done = False\n\n            while done == False:\n\n                for batch in tqdm(eval_loader):\n\n                    model.load_state_dict(shared_model.state_dict())\n                    model.cuda()\n\n                    idx, questions, _, img_feats, actions_in, actions_out, action_length = batch\n                    metrics_slug = {}\n\n                    # evaluate at multiple initializations\n                    for i in [10, 30, 50]:\n\n                        t += 1\n\n                        if action_length[0] + 1 - i - 5 < 0:\n                            invalids.append(idx[0])\n                            continue\n\n                        ep_inds = [\n                            x for x in range(action_length[0] + 1 - i - 5,\n                                             action_length[0] + 1 - i)\n                        ]\n\n                        sub_img_feats = torch.index_select(\n                            img_feats, 1, torch.LongTensor(ep_inds))\n\n                        init_pos = eval_loader.dataset.episode_pos_queue[\n                            ep_inds[-1]]\n\n                        h3d = eval_loader.dataset.episode_house\n\n                        h3d.env.reset(\n                            x=init_pos[0], y=init_pos[2], yaw=init_pos[3])\n\n                        init_dist_to_target = h3d.get_dist_to_target(\n                            h3d.env.cam.pos)\n                        if init_dist_to_target < 0:  # unreachable\n                            invalids.append(idx[0])\n                            continue\n\n                        sub_img_feats_var = Variable(sub_img_feats.cuda())\n                        if '+q' in args.model_type:\n                            questions_var = Variable(questions.cuda())\n\n                        # sample actions till max steps or <stop>\n                        # max no. of actions = 100\n\n                        episode_length = 0\n                        episode_done = True\n\n                        dists_to_target, pos_queue, actions = [\n                            init_dist_to_target\n                        ], [init_pos], []\n\n                        for step in range(args.max_episode_length):\n\n                            episode_length += 1\n\n                            if '+q' in args.model_type:\n                                scores = model(sub_img_feats_var,\n                                               questions_var)\n                            else:\n                                scores = model(sub_img_feats_var)\n\n                            prob = F.softmax(scores, dim=1)\n\n                            action = int(prob.max(1)[1].data.cpu().numpy()[0])\n\n                            actions.append(action)\n\n                            img, _, episode_done = h3d.step(action)\n\n                            episode_done = episode_done or episode_length >= args.max_episode_length\n\n                            img = torch.from_numpy(img.transpose(\n                                2, 0, 1)).float() / 255.0\n                            img_feat_var = eval_loader.dataset.cnn(\n                                Variable(img.view(1, 3, 224, 224)\n                                         .cuda())).view(1, 1, 3200)\n                            sub_img_feats_var = torch.cat(\n                                [sub_img_feats_var, img_feat_var], dim=1)\n                            sub_img_feats_var = sub_img_feats_var[:, -5:, :]\n\n                            dists_to_target.append(\n                                h3d.get_dist_to_target(h3d.env.cam.pos))\n                            pos_queue.append([\n                                h3d.env.cam.pos.x, h3d.env.cam.pos.y,\n                                h3d.env.cam.pos.z, h3d.env.cam.yaw\n                            ])\n\n                            if episode_done == True:\n                                break\n\n                        # compute stats\n                        metrics_slug['d_0_' + str(i)] = dists_to_target[0]\n                        metrics_slug['d_T_' + str(i)] = dists_to_target[-1]\n                        metrics_slug['d_D_' + str(\n                            i)] = dists_to_target[0] - dists_to_target[-1]\n                        metrics_slug['d_min_' + str(i)] = np.array(\n                            dists_to_target).min()\n                        metrics_slug['ep_len_' + str(i)] = episode_length\n                        if action == 3:\n                            metrics_slug['stop_' + str(i)] = 1\n                        else:\n                            metrics_slug['stop_' + str(i)] = 0\n                        inside_room = []\n                        for p in pos_queue:\n                            inside_room.append(\n                                h3d.is_inside_room(\n                                    p, eval_loader.dataset.target_room))\n                        if inside_room[-1] == True:\n                            metrics_slug['r_T_' + str(i)] = 1\n                        else:\n                            metrics_slug['r_T_' + str(i)] = 0\n                        if any([x == True for x in inside_room]) == True:\n                            metrics_slug['r_e_' + str(i)] = 1\n                        else:\n                            metrics_slug['r_e_' + str(i)] = 0\n\n                    # collate and update metrics\n                    metrics_list = []\n                    for i in metrics.metric_names:\n                        if i not in metrics_slug:\n                            metrics_list.append(metrics.metrics[\n                                metrics.metric_names.index(i)][0])\n                        else:\n                            metrics_list.append(metrics_slug[i])\n\n                    # update metrics\n                    metrics.update(metrics_list)\n\n                print(metrics.get_stat_string(mode=0))\n                print('invalids', len(invalids))\n                logging.info(\"EVAL: metrics: {}\".format(metrics.get_stat_string(mode=0)))\n                logging.info(\"EVAL: invalids: {}\".format(len(invalids)))\n\n               # del h3d\n                eval_loader.dataset._load_envs()\n                if len(eval_loader.dataset.pruned_env_set) == 0:\n                    done = True\n\n        elif 'lstm' in args.model_type:\n\n            done = False\n\n            while done == False:\n\n                if args.overfit:\n                    metrics = NavMetric(\n                        info={'split': args.eval_split,\n                              'thread': rank},\n                        metric_names=[\n                            'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',\n                            'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',\n                            'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',\n                            'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',\n                            'ep_len_30', 'ep_len_50'\n                        ],\n                        log_json=args.output_log_path)\n\n                for batch in tqdm(eval_loader):\n\n                    model.load_state_dict(shared_model.state_dict())\n                    model.cuda()\n\n                    idx, questions, answer, _, actions_in, actions_out, action_lengths, _ = batch\n                    question_var = Variable(questions.cuda())\n                    metrics_slug = {}\n\n                    # evaluate at multiple initializations\n                    for i in [10, 30, 50]:\n\n                        t += 1\n\n                        if action_lengths[0] - 1 - i < 0:\n                            invalids.append([idx[0], i])\n                            continue\n\n                        h3d = eval_loader.dataset.episode_house\n\n                        # forward through lstm till spawn\n                        if len(eval_loader.dataset.episode_pos_queue[:-i]\n                               ) > 0:\n                            images = eval_loader.dataset.get_frames(\n                                h3d,\n                                eval_loader.dataset.episode_pos_queue[:-i],\n                                preprocess=True)\n                            raw_img_feats = eval_loader.dataset.cnn(\n                                Variable(torch.FloatTensor(images).cuda()))\n\n                            actions_in_pruned = actions_in[:, :\n                                                           action_lengths[0] -\n                                                           i]\n                            actions_in_var = Variable(actions_in_pruned.cuda())\n                            action_lengths_pruned = action_lengths.clone(\n                            ).fill_(action_lengths[0] - i)\n                            img_feats_var = raw_img_feats.view(1, -1, 3200)\n\n                            if '+q' in args.model_type:\n                                scores, hidden = model(\n                                    img_feats_var, question_var,\n                                    actions_in_var,\n                                    action_lengths_pruned.cpu().numpy())\n                            else:\n                                scores, hidden = model(\n                                    img_feats_var, False, actions_in_var,\n                                    action_lengths_pruned.cpu().numpy())\n                            try:\n                                init_pos = eval_loader.dataset.episode_pos_queue[\n                                    -i]\n                            except:\n                                invalids.append([idx[0], i])\n                                continue\n\n                            action_in = torch.LongTensor(1, 1).fill_(\n                                actions_in[0,\n                                           action_lengths[0] - i]).cuda()\n                        else:\n                            init_pos = eval_loader.dataset.episode_pos_queue[\n                                -i]\n                            hidden = model.nav_rnn.init_hidden(1)\n                            action_in = torch.LongTensor(1, 1).fill_(0).cuda()\n\n                        h3d.env.reset(\n                            x=init_pos[0], y=init_pos[2], yaw=init_pos[3])\n\n                        init_dist_to_target = h3d.get_dist_to_target(\n                            h3d.env.cam.pos)\n                        if init_dist_to_target < 0:  # unreachable\n                            invalids.append([idx[0], i])\n                            continue\n\n                        img = h3d.env.render()\n                        img = torch.from_numpy(img.transpose(\n                            2, 0, 1)).float() / 255.0\n                        img_feat_var = eval_loader.dataset.cnn(\n                            Variable(img.view(1, 3, 224, 224).cuda())).view(\n                                1, 1, 3200)\n\n                        episode_length = 0\n                        episode_done = True\n\n                        dists_to_target, pos_queue, actions = [\n                            init_dist_to_target\n                        ], [init_pos], []\n                        actual_pos_queue = [(h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw)]\n\n                        for step in range(args.max_episode_length):\n\n                            episode_length += 1\n\n                            if '+q' in args.model_type:\n                                scores, hidden = model(\n                                    img_feat_var,\n                                    question_var,\n                                    Variable(action_in),\n                                    False,\n                                    hidden=hidden,\n                                    step=True)\n                            else:\n                                scores, hidden = model(\n                                    img_feat_var,\n                                    False,\n                                    Variable(action_in),\n                                    False,\n                                    hidden=hidden,\n                                    step=True)\n\n                            prob = F.softmax(scores, dim=1)\n\n                            action = int(prob.max(1)[1].data.cpu().numpy()[0])\n\n                            actions.append(action)\n\n                            img, _, episode_done = h3d.step(action)\n\n                            episode_done = episode_done or episode_length >= args.max_episode_length\n\n                            img = torch.from_numpy(img.transpose(\n                                2, 0, 1)).float() / 255.0\n                            img_feat_var = eval_loader.dataset.cnn(\n                                Variable(img.view(1, 3, 224, 224)\n                                         .cuda())).view(1, 1, 3200)\n\n                            action_in = torch.LongTensor(\n                                1, 1).fill_(action + 1).cuda()\n\n                            dists_to_target.append(\n                                h3d.get_dist_to_target(h3d.env.cam.pos))\n                            pos_queue.append([\n                                h3d.env.cam.pos.x, h3d.env.cam.pos.y,\n                                h3d.env.cam.pos.z, h3d.env.cam.yaw\n                            ])\n\n                            if episode_done == True:\n                                break\n\n                            actual_pos_queue.append([\n                                h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw])\n\n                        # compute stats\n                        metrics_slug['d_0_' + str(i)] = dists_to_target[0]\n                        metrics_slug['d_T_' + str(i)] = dists_to_target[-1]\n                        metrics_slug['d_D_' + str(\n                            i)] = dists_to_target[0] - dists_to_target[-1]\n                        metrics_slug['d_min_' + str(i)] = np.array(\n                            dists_to_target).min()\n                        metrics_slug['ep_len_' + str(i)] = episode_length\n                        if action == 3:\n                            metrics_slug['stop_' + str(i)] = 1\n                        else:\n                            metrics_slug['stop_' + str(i)] = 0\n                        inside_room = []\n                        for p in pos_queue:\n                            inside_room.append(\n                                h3d.is_inside_room(\n                                    p, eval_loader.dataset.target_room))\n                        if inside_room[-1] == True:\n                            metrics_slug['r_T_' + str(i)] = 1\n                        else:\n                            metrics_slug['r_T_' + str(i)] = 0\n                        if any([x == True for x in inside_room]) == True:\n                            metrics_slug['r_e_' + str(i)] = 1\n                        else:\n                            metrics_slug['r_e_' + str(i)] = 0\n\n                    # collate and update metrics\n                    metrics_list = []\n                    for i in metrics.metric_names:\n                        if i not in metrics_slug:\n                            metrics_list.append(metrics.metrics[\n                                metrics.metric_names.index(i)][0])\n                        else:\n                            metrics_list.append(metrics_slug[i])\n\n                    # update metrics\n                    metrics.update(metrics_list)\n\n                print(metrics.get_stat_string(mode=0))\n                print('invalids', len(invalids))\n                logging.info(\"EVAL: init_steps: {} metrics: {}\".format(i, metrics.get_stat_string(mode=0)))\n                logging.info(\"EVAL: init_steps: {} invalids: {}\".format(i, len(invalids)))\n\n                # del h3d\n                eval_loader.dataset._load_envs()\n                print(\"eval_loader pruned_env_set len: {}\".format(len(eval_loader.dataset.pruned_env_set)))\n                logging.info(\"eval_loader pruned_env_set len: {}\".format(len(eval_loader.dataset.pruned_env_set)))\n                assert len(eval_loader.dataset.pruned_env_set) > 0\n                if len(eval_loader.dataset.pruned_env_set) == 0:\n                    done = True\n\n        elif 'pacman' in args.model_type:\n\n            done = False\n\n            while done == False:\n                if args.overfit:\n                    metrics = NavMetric(\n                        info={'split': args.eval_split,\n                              'thread': rank},\n                        metric_names=[\n                            'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',\n                            'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',\n                            'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',\n                            'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',\n                            'ep_len_30', 'ep_len_50'\n                        ],\n                        log_json=args.output_log_path)\n\n                for batch in tqdm(eval_loader):\n\n                    model.load_state_dict(shared_model.state_dict())\n                    model.cuda()\n\n                    idx, question, answer, actions, action_length = batch\n                    metrics_slug = {}\n\n                    h3d = eval_loader.dataset.episode_house\n\n                    # evaluate at multiple initializations\n                    for i in [10, 30, 50]:\n\n                        t += 1\n\n                        if i > action_length[0]:\n                            invalids.append([idx[0], i])\n                            continue\n\n                        question_var = Variable(question.cuda())\n\n                        controller_step = False\n                        planner_hidden = model.planner_nav_rnn.init_hidden(1)\n\n                        # get hierarchical action history\n                        (\n                            planner_actions_in, planner_img_feats,\n                            controller_step, controller_action_in,\n                            controller_img_feats, init_pos,\n                            controller_action_counter\n                        ) = eval_loader.dataset.get_hierarchical_features_till_spawn(\n                            actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions\n                        )\n\n                        planner_actions_in_var = Variable(\n                            planner_actions_in.cuda())\n                        planner_img_feats_var = Variable(\n                            planner_img_feats.cuda())\n\n                        # forward planner till spawn to update hidden state\n                        for step in range(planner_actions_in.size(0)):\n\n                            planner_scores, planner_hidden = model.planner_step(\n                                question_var, planner_img_feats_var[step]\n                                .unsqueeze(0).unsqueeze(0),\n                                planner_actions_in_var[step].view(1, 1),\n                                planner_hidden\n                            )\n\n                        h3d.env.reset(\n                            x=init_pos[0], y=init_pos[2], yaw=init_pos[3])\n\n                        init_dist_to_target = h3d.get_dist_to_target(\n                            h3d.env.cam.pos)\n                        if init_dist_to_target < 0:  # unreachable\n                            invalids.append([idx[0], i])\n                            continue\n\n                        dists_to_target, pos_queue, pred_actions = [\n                            init_dist_to_target\n                        ], [init_pos], []\n                        planner_actions, controller_actions = [], []\n\n                        episode_length = 0\n                        if args.max_controller_actions > 1:\n                            controller_action_counter = controller_action_counter % args.max_controller_actions\n                            controller_action_counter = max(controller_action_counter - 1, 0)\n                        else:\n                            controller_action_counter = 0\n\n                        first_step = True\n                        first_step_is_controller = controller_step\n                        planner_step = True\n                        action = int(controller_action_in)\n\n                        for step in range(args.max_episode_length):\n                            if not first_step:\n                                img = torch.from_numpy(img.transpose(\n                                    2, 0, 1)).float() / 255.0\n                                img_feat_var = eval_loader.dataset.cnn(\n                                    Variable(img.view(1, 3, 224,\n                                                      224).cuda())).view(\n                                                          1, 1, 3200)\n                            else:\n                                img_feat_var = Variable(controller_img_feats.cuda()).view(1, 1, 3200)\n\n                            if not first_step or first_step_is_controller:\n                                # query controller to continue or not\n                                controller_action_in = Variable(\n                                    torch.LongTensor(1, 1).fill_(action).cuda())\n                                controller_scores = model.controller_step(\n                                    img_feat_var, controller_action_in,\n                                    planner_hidden[0])\n\n                                prob = F.softmax(controller_scores, dim=1)\n                                controller_action = int(\n                                    prob.max(1)[1].data.cpu().numpy()[0])\n\n                                if controller_action == 1 and controller_action_counter < args.max_controller_actions - 1:\n                                    controller_action_counter += 1\n                                    planner_step = False\n                                else:\n                                    controller_action_counter = 0\n                                    planner_step = True\n                                    controller_action = 0\n\n                                controller_actions.append(controller_action)\n                                first_step = False\n\n                            if planner_step:\n                                if not first_step:\n                                    action_in = torch.LongTensor(\n                                        1, 1).fill_(action + 1).cuda()\n                                    planner_scores, planner_hidden = model.planner_step(\n                                        question_var, img_feat_var,\n                                        Variable(action_in), planner_hidden)\n\n                                prob = F.softmax(planner_scores, dim=1)\n                                action = int(\n                                    prob.max(1)[1].data.cpu().numpy()[0])\n                                planner_actions.append(action)\n\n                            episode_done = action == 3 or episode_length >= args.max_episode_length\n\n                            episode_length += 1\n                            dists_to_target.append(\n                                h3d.get_dist_to_target(h3d.env.cam.pos))\n                            pos_queue.append([\n                                h3d.env.cam.pos.x, h3d.env.cam.pos.y,\n                                h3d.env.cam.pos.z, h3d.env.cam.yaw\n                            ])\n\n                            if episode_done:\n                                break\n\n                            img, _, _ = h3d.step(action)\n                            first_step = False\n\n                        # compute stats\n                        metrics_slug['d_0_' + str(i)] = dists_to_target[0]\n                        metrics_slug['d_T_' + str(i)] = dists_to_target[-1]\n                        metrics_slug['d_D_' + str(\n                            i)] = dists_to_target[0] - dists_to_target[-1]\n                        metrics_slug['d_min_' + str(i)] = np.array(\n                            dists_to_target).min()\n                        metrics_slug['ep_len_' + str(i)] = episode_length\n                        if action == 3:\n                            metrics_slug['stop_' + str(i)] = 1\n                        else:\n                            metrics_slug['stop_' + str(i)] = 0\n                        inside_room = []\n                        for p in pos_queue:\n                            inside_room.append(\n                                h3d.is_inside_room(\n                                    p, eval_loader.dataset.target_room))\n                        if inside_room[-1] == True:\n                            metrics_slug['r_T_' + str(i)] = 1\n                        else:\n                            metrics_slug['r_T_' + str(i)] = 0\n                        if any([x == True for x in inside_room]) == True:\n                            metrics_slug['r_e_' + str(i)] = 1\n                        else:\n                            metrics_slug['r_e_' + str(i)] = 0\n\n                    # collate and update metrics\n                    metrics_list = []\n                    for i in metrics.metric_names:\n                        if i not in metrics_slug:\n                            metrics_list.append(metrics.metrics[\n                                metrics.metric_names.index(i)][0])\n                        else:\n                            metrics_list.append(metrics_slug[i])\n\n                    # update metrics\n                    metrics.update(metrics_list)\n\n                try:\n                    print(metrics.get_stat_string(mode=0))\n                    logging.info(\"EVAL: metrics: {}\".format(metrics.get_stat_string(mode=0)))\n                except:\n                    pass\n\n                print('epoch', epoch)\n                print('invalids', len(invalids))\n                logging.info(\"EVAL: epoch {}\".format(epoch))\n                logging.info(\"EVAL: invalids {}\".format(invalids))\n\n                # del h3d\n                eval_loader.dataset._load_envs()\n                if len(eval_loader.dataset.pruned_env_set) == 0:\n                    done = True\n\n        epoch += 1\n\n        # checkpoint if best val loss\n        if metrics.metrics[8][0] > best_eval_acc:  # d_D_50\n            best_eval_acc = metrics.metrics[8][0]\n            if epoch % args.eval_every == 0 and args.log == True:\n                metrics.dump_log()\n\n                model_state = get_state(model)\n\n                aad = dict(args.__dict__)\n                ad = {}\n                for i in aad:\n                    if i[0] != '_':\n                        ad[i] = aad[i]\n\n                checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}\n\n                checkpoint_path = '%s/epoch_%d_d_D_50_%.04f.pt' % (\n                    args.checkpoint_dir, epoch, best_eval_acc)\n                print('Saving checkpoint to %s' % checkpoint_path)\n                logging.info(\"EVAL: Saving checkpoint to {}\".format(checkpoint_path))\n                torch.save(checkpoint, checkpoint_path)\n\n        print('[best_eval_d_D_50:%.04f]' % best_eval_acc)\n        logging.info(\"EVAL: [best_eval_d_D_50:{:.04f}]\".format(best_eval_acc))\n\n        eval_loader.dataset._load_envs(start_idx=0, in_order=True)\n\n\ndef train(rank, args, shared_model):\n    torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))\n\n    if args.model_type == 'cnn':\n\n        model_kwargs = {}\n        model = NavCnnModel(**model_kwargs)\n\n    elif args.model_type == 'cnn+q':\n\n        model_kwargs = {\n            'question_input': True,\n            'question_vocab': load_vocab(args.vocab_json)\n        }\n        model = NavCnnModel(**model_kwargs)\n\n    elif args.model_type == 'lstm':\n\n        model_kwargs = {}\n        model = NavCnnRnnModel(**model_kwargs)\n\n    elif args.model_type == 'lstm-mult+q':\n\n        model_kwargs = {\n            'question_input': True,\n            'question_vocab': load_vocab(args.vocab_json)\n        }\n        model = NavCnnRnnMultModel(**model_kwargs)\n\n    elif args.model_type == 'lstm+q':\n\n        model_kwargs = {\n            'question_input': True,\n            'question_vocab': load_vocab(args.vocab_json)\n        }\n        model = NavCnnRnnModel(**model_kwargs)\n\n    elif args.model_type == 'pacman':\n\n        model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}\n        model = NavPlannerControllerModel(**model_kwargs)\n\n    else:\n\n        exit()\n\n    lossFn = torch.nn.CrossEntropyLoss().cuda()\n\n    optim = torch.optim.Adamax(\n        filter(lambda p: p.requires_grad, shared_model.parameters()),\n        lr=args.learning_rate)\n\n    train_loader_kwargs = {\n        'questions_h5': args.train_h5,\n        'data_json': args.data_json,\n        'vocab': args.vocab_json,\n        'batch_size': args.batch_size,\n        'input_type': args.model_type,\n        'num_frames': 5,\n        'map_resolution': args.map_resolution,\n        'split': 'train',\n        'max_threads_per_gpu': args.max_threads_per_gpu,\n        'gpu_id': args.gpus[rank % len(args.gpus)],\n        'to_cache': args.cache,\n        'overfit': args.overfit,\n        'max_controller_actions': args.max_controller_actions,\n        'max_actions': args.max_actions\n    }\n\n    args.output_log_path = os.path.join(args.log_dir,\n                                        'train_' + str(rank) + '.json')\n\n    if 'pacman' in args.model_type:\n\n        metrics = NavMetric(\n            info={'split': 'train',\n                  'thread': rank},\n            metric_names=['planner_loss', 'controller_loss'],\n            log_json=args.output_log_path)\n\n    else:\n\n        metrics = NavMetric(\n            info={'split': 'train',\n                  'thread': rank},\n            metric_names=['loss'],\n            log_json=args.output_log_path)\n\n    train_loader = EqaDataLoader(**train_loader_kwargs)\n\n    print('train_loader has %d samples' % len(train_loader.dataset))\n    logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))\n\n    t, epoch = 0, 0\n\n    while epoch < int(args.max_epochs):\n\n        if 'cnn' in args.model_type:\n\n            done = False\n            all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()\n\n            while done == False:\n\n                for batch in train_loader:\n\n                    t += 1\n\n                    model.load_state_dict(shared_model.state_dict())\n                    model.train()\n                    model.cuda()\n\n                    idx, questions, _, img_feats, _, actions_out, _ = batch\n\n                    img_feats_var = Variable(img_feats.cuda())\n                    if '+q' in args.model_type:\n                        questions_var = Variable(questions.cuda())\n                    actions_out_var = Variable(actions_out.cuda())\n\n                    if '+q' in args.model_type:\n                        scores = model(img_feats_var, questions_var)\n                    else:\n                        scores = model(img_feats_var)\n\n                    loss = lossFn(scores, actions_out_var)\n\n                    # zero grad\n                    optim.zero_grad()\n\n                    # update metrics\n                    metrics.update([loss.data[0]])\n\n                    # backprop and update\n                    loss.backward()\n\n                    ensure_shared_grads(model.cpu(), shared_model)\n                    optim.step()\n\n                    if t % args.print_every == 0:\n                        print(metrics.get_stat_string())\n                        logging.info(\"TRAIN: metrics: {}\".format(metrics.get_stat_string()))\n                        if args.log == True:\n                            metrics.dump_log()\n\n                    print('[CHECK][Cache:%d][Total:%d]' %\n                          (len(train_loader.dataset.img_data_cache),\n                           len(train_loader.dataset.env_list)))\n                    logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(\n                        len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))\n\n                if all_envs_loaded == False:\n                    train_loader.dataset._load_envs(in_order=True)\n                    if len(train_loader.dataset.pruned_env_set) == 0:\n                        done = True\n                        if args.cache == False:\n                            train_loader.dataset._load_envs(\n                                start_idx=0, in_order=True)\n                else:\n                    done = True\n\n        elif 'lstm' in args.model_type:\n\n            lossFn = MaskedNLLCriterion().cuda()\n\n            done = False\n            all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()\n            total_times = []\n            while done == False:\n\n                start_time = time.time()\n                for batch in train_loader:\n\n                    t += 1\n\n\n                    model.load_state_dict(shared_model.state_dict())\n                    model.train()\n                    model.cuda()\n\n                    idx, questions, _, img_feats, actions_in, actions_out, action_lengths, masks = batch\n\n                    img_feats_var = Variable(img_feats.cuda())\n                    if '+q' in args.model_type:\n                        questions_var = Variable(questions.cuda())\n                    actions_in_var = Variable(actions_in.cuda())\n                    actions_out_var = Variable(actions_out.cuda())\n                    action_lengths = action_lengths.cuda()\n                    masks_var = Variable(masks.cuda())\n\n                    action_lengths, perm_idx = action_lengths.sort(\n                        0, descending=True)\n\n                    img_feats_var = img_feats_var[perm_idx]\n                    if '+q' in args.model_type:\n                        questions_var = questions_var[perm_idx]\n                    actions_in_var = actions_in_var[perm_idx]\n                    actions_out_var = actions_out_var[perm_idx]\n                    masks_var = masks_var[perm_idx]\n\n                    if '+q' in args.model_type:\n                        scores, hidden = model(img_feats_var, questions_var,\n                                               actions_in_var,\n                                               action_lengths.cpu().numpy())\n                    else:\n                        scores, hidden = model(img_feats_var, False,\n                                               actions_in_var,\n                                               action_lengths.cpu().numpy())\n\n                    #block out masks\n                    if args.curriculum:\n                        curriculum_length = (epoch+1)*5\n                        for i, action_length in enumerate(action_lengths):\n                            if action_length - curriculum_length > 0:\n                                masks_var[i, :action_length-curriculum_length] = 0\n\n                    logprob = F.log_softmax(scores, dim=1)\n                    loss = lossFn(\n                        logprob, actions_out_var[:, :action_lengths.max()]\n                        .contiguous().view(-1, 1),\n                        masks_var[:, :action_lengths.max()].contiguous().view(\n                            -1, 1))\n\n                    # zero grad\n                    optim.zero_grad()\n\n                    # update metrics\n                    metrics.update([loss.data[0]])\n                    logging.info(\"TRAIN LSTM loss: {:.6f}\".format(loss.data[0]))\n\n                    # backprop and update\n                    loss.backward()\n\n                    ensure_shared_grads(model.cpu(), shared_model)\n                    optim.step()\n\n                    if t % args.print_every == 0:\n                        print(metrics.get_stat_string())\n                        logging.info(\"TRAIN: metrics: {}\".format(metrics.get_stat_string()))\n                        if args.log == True:\n                            metrics.dump_log()\n\n                    print('[CHECK][Cache:%d][Total:%d]' %\n                          (len(train_loader.dataset.img_data_cache),\n                           len(train_loader.dataset.env_list)))\n                    logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(\n                        len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))\n\n\n                if all_envs_loaded == False:\n                    train_loader.dataset._load_envs(in_order=True)\n                    if len(train_loader.dataset.pruned_env_set) == 0:\n                        done = True\n                        if args.cache == False:\n                            train_loader.dataset._load_envs(\n                                start_idx=0, in_order=True)\n                else:\n                    done = True\n\n        elif 'pacman' in args.model_type:\n\n            planner_lossFn = MaskedNLLCriterion().cuda()\n            controller_lossFn = MaskedNLLCriterion().cuda()\n\n            done = False\n            all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()\n\n            while done == False:\n\n                for batch in train_loader:\n\n                    t += 1\n\n                    model.load_state_dict(shared_model.state_dict())\n                    model.train()\n                    model.cuda()\n\n                    idx, questions, _, planner_img_feats, planner_actions_in, \\\n                        planner_actions_out, planner_action_lengths, planner_masks, \\\n                        controller_img_feats, controller_actions_in, planner_hidden_idx, \\\n                        controller_outs, controller_action_lengths, controller_masks = batch\n\n                    questions_var = Variable(questions.cuda())\n\n                    planner_img_feats_var = Variable(planner_img_feats.cuda())\n                    planner_actions_in_var = Variable(\n                        planner_actions_in.cuda())\n                    planner_actions_out_var = Variable(\n                        planner_actions_out.cuda())\n                    planner_action_lengths = planner_action_lengths.cuda()\n                    planner_masks_var = Variable(planner_masks.cuda())\n\n                    controller_img_feats_var = Variable(\n                        controller_img_feats.cuda())\n                    controller_actions_in_var = Variable(\n                        controller_actions_in.cuda())\n                    planner_hidden_idx_var = Variable(\n                        planner_hidden_idx.cuda())\n                    controller_outs_var = Variable(controller_outs.cuda())\n                    controller_action_lengths = controller_action_lengths.cuda(\n                    )\n                    controller_masks_var = Variable(controller_masks.cuda())\n\n                    planner_action_lengths, perm_idx = planner_action_lengths.sort(\n                        0, descending=True)\n\n                    questions_var = questions_var[perm_idx]\n\n                    planner_img_feats_var = planner_img_feats_var[perm_idx]\n                    planner_actions_in_var = planner_actions_in_var[perm_idx]\n                    planner_actions_out_var = planner_actions_out_var[perm_idx]\n                    planner_masks_var = planner_masks_var[perm_idx]\n\n                    controller_img_feats_var = controller_img_feats_var[\n                        perm_idx]\n                    controller_actions_in_var = controller_actions_in_var[\n                        perm_idx]\n                    controller_outs_var = controller_outs_var[perm_idx]\n                    planner_hidden_idx_var = planner_hidden_idx_var[perm_idx]\n                    controller_action_lengths = controller_action_lengths[\n                        perm_idx]\n                    controller_masks_var = controller_masks_var[perm_idx]\n\n                    planner_scores, controller_scores, planner_hidden = model(\n                        questions_var, planner_img_feats_var,\n                        planner_actions_in_var,\n                        planner_action_lengths.cpu().numpy(),\n                        planner_hidden_idx_var, controller_img_feats_var,\n                        controller_actions_in_var, controller_action_lengths)\n\n                    planner_logprob = F.log_softmax(planner_scores, dim=1)\n                    controller_logprob = F.log_softmax(\n                        controller_scores, dim=1)\n\n                    planner_loss = planner_lossFn(\n                        planner_logprob,\n                        planner_actions_out_var[:, :planner_action_lengths.max(\n                        )].contiguous().view(-1, 1),\n                        planner_masks_var[:, :planner_action_lengths.max()]\n                        .contiguous().view(-1, 1))\n\n                    controller_loss = controller_lossFn(\n                        controller_logprob,\n                        controller_outs_var[:, :controller_action_lengths.max(\n                        )].contiguous().view(-1, 1),\n                        controller_masks_var[:, :controller_action_lengths.max(\n                        )].contiguous().view(-1, 1))\n\n                    # zero grad\n                    optim.zero_grad()\n\n                    # update metrics\n                    metrics.update(\n                        [planner_loss.data[0], controller_loss.data[0]])\n                    logging.info(\"TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}\".format(\n                        planner_loss.data[0], controller_loss.data[0]))\n\n                    # backprop and update\n                    if args.max_controller_actions == 1:\n                        (planner_loss).backward()\n                    else:\n                        (planner_loss + controller_loss).backward()\n\n                    ensure_shared_grads(model.cpu(), shared_model)\n                    optim.step()\n\n                    if t % args.print_every == 0:\n                        print(metrics.get_stat_string())\n                        logging.info(\"TRAIN: metrics: {}\".format(metrics.get_stat_string()))\n                        if args.log == True:\n                            metrics.dump_log()\n\n                    print('[CHECK][Cache:%d][Total:%d]' %\n                          (len(train_loader.dataset.img_data_cache),\n                           len(train_loader.dataset.env_list)))\n                    logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(\n                        len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))\n\n                if all_envs_loaded == False:\n                    train_loader.dataset._load_envs(in_order=True)\n                    if len(train_loader.dataset.pruned_env_set) == 0:\n                        done = True\n                        if args.cache == False:\n                            train_loader.dataset._load_envs(\n                                start_idx=0, in_order=True)\n                else:\n                    done = True\n\n        epoch += 1\n\n        if epoch % args.save_every == 0:\n\n            model_state = get_state(model)\n            optimizer_state = optim.state_dict()\n\n            aad = dict(args.__dict__)\n            ad = {}\n            for i in aad:\n                if i[0] != '_':\n                    ad[i] = aad[i]\n\n            checkpoint = {'args': ad,\n                          'state': model_state,\n                          'epoch': epoch,\n                          'optimizer': optimizer_state}\n\n            checkpoint_path = '%s/epoch_%d_thread_%d.pt' % (\n                args.checkpoint_dir, epoch, rank)\n            print('Saving checkpoint to %s' % checkpoint_path)\n            logging.info(\"TRAIN: Saving checkpoint to {}\".format(checkpoint_path))\n            torch.save(checkpoint, checkpoint_path)\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # data params\n    parser.add_argument('-train_h5', default='data/train.h5')\n    parser.add_argument('-val_h5', default='data/val.h5')\n    parser.add_argument('-test_h5', default='data/test.h5')\n    parser.add_argument('-data_json', default='data/data.json')\n    parser.add_argument('-vocab_json', default='data/vocab.json')\n\n    parser.add_argument(\n        '-target_obj_conn_map_dir',\n        default='data/target-obj-conn-maps/500')\n    parser.add_argument('-map_resolution', default=500, type=int)\n\n    parser.add_argument(\n        '-mode',\n        default='train+eval',\n        type=str,\n        choices=['train', 'eval', 'train+eval'])\n    parser.add_argument('-eval_split', default='val', type=str)\n\n    # model details\n    parser.add_argument(\n        '-model_type',\n        default='cnn',\n        choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'lstm-mult+q', 'pacman'])\n    parser.add_argument('-max_episode_length', default=100, type=int)\n    parser.add_argument('-curriculum', default=0, type=int)\n\n    # optim params\n    parser.add_argument('-batch_size', default=20, type=int)\n    parser.add_argument('-learning_rate', default=1e-3, type=float)\n    parser.add_argument('-max_epochs', default=1000, type=int)\n    parser.add_argument('-overfit', default=False, action='store_true')\n\n    # bookkeeping\n    parser.add_argument('-print_every', default=5, type=int)\n    parser.add_argument('-eval_every', default=1, type=int)\n    parser.add_argument('-save_every', default=1000, type=int) #optional if you would like to save specific epochs as opposed to relying on the eval thread\n    parser.add_argument('-identifier', default='cnn')\n    parser.add_argument('-num_processes', default=1, type=int)\n    parser.add_argument('-max_threads_per_gpu', default=10, type=int)\n\n    # checkpointing\n    parser.add_argument('-checkpoint_path', default=False)\n    parser.add_argument('-checkpoint_dir', default='checkpoints/nav/')\n    parser.add_argument('-log_dir', default='logs/nav/')\n    parser.add_argument('-log', default=False, action='store_true')\n    parser.add_argument('-cache', default=False, action='store_true')\n    parser.add_argument('-max_controller_actions', type=int, default=5)\n    parser.add_argument('-max_actions', type=int)\n    args = parser.parse_args()\n\n    args.time_id = time.strftime(\"%m_%d_%H:%M\")\n\n    #MAX_CONTROLLER_ACTIONS = args.max_controller_actions\n\n    if not os.path.isdir(args.log_dir):\n        os.makedirs(args.log_dir)\n\n    if args.curriculum:\n        assert 'lstm' in args.model_type #TODO: Finish implementing curriculum for other model types\n\n    logging.basicConfig(filename=os.path.join(args.log_dir, \"run_{}.log\".format(\n                                                str(datetime.now()).replace(' ', '_'))),\n                        level=logging.INFO,\n                        format='%(asctime)-15s %(message)s')\n\n    try:\n        args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n        args.gpus = [int(x) for x in args.gpus]\n    except KeyError:\n        print(\"CPU not supported\")\n        logging.info(\"CPU not supported\")\n        exit()\n\n    if args.checkpoint_path != False:\n\n        print('Loading checkpoint from %s' % args.checkpoint_path)\n        logging.info(\"Loading checkpoint from {}\".format(args.checkpoint_path))\n\n        args_to_keep = ['model_type']\n\n        checkpoint = torch.load(args.checkpoint_path, map_location={\n            'cuda:0': 'cpu'\n        })\n\n        for i in args.__dict__:\n            if i not in args_to_keep:\n                checkpoint['args'][i] = args.__dict__[i]\n\n        args = type('new_dict', (object, ), checkpoint['args'])\n\n    args.checkpoint_dir = os.path.join(args.checkpoint_dir,\n                                       args.time_id + '_' + args.identifier)\n    args.log_dir = os.path.join(args.log_dir,\n                                args.time_id + '_' + args.identifier)\n\n\n    # if set to overfit; set eval_split to train\n    if args.overfit == True:\n        args.eval_split = 'train'\n\n    print(args.__dict__)\n    logging.info(args.__dict__)\n\n    if not os.path.exists(args.checkpoint_dir):\n        os.makedirs(args.checkpoint_dir)\n        os.makedirs(args.log_dir)\n\n    if args.model_type == 'cnn':\n\n        model_kwargs = {}\n        shared_model = NavCnnModel(**model_kwargs)\n\n    elif args.model_type == 'cnn+q':\n\n        model_kwargs = {\n            'question_input': True,\n            'question_vocab': load_vocab(args.vocab_json)\n        }\n        shared_model = NavCnnModel(**model_kwargs)\n\n    elif args.model_type == 'lstm':\n\n        model_kwargs = {}\n        shared_model = NavCnnRnnModel(**model_kwargs)\n\n    elif args.model_type == 'lstm+q':\n\n        model_kwargs = {\n            'question_input': True,\n            'question_vocab': load_vocab(args.vocab_json)\n        }\n        shared_model = NavCnnRnnModel(**model_kwargs)\n\n    elif args.model_type == 'pacman':\n\n        model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}\n        shared_model = NavPlannerControllerModel(**model_kwargs)\n\n    else:\n\n        exit()\n\n    shared_model.share_memory()\n\n    if args.checkpoint_path != False:\n        print('Loading params from checkpoint: %s' % args.checkpoint_path)\n        logging.info(\"Loading params from checkpoint: {}\".format(args.checkpoint_path))\n        shared_model.load_state_dict(checkpoint['state'])\n\n    if args.mode == 'eval':\n\n        eval(0, args, shared_model)\n\n    elif args.mode == 'train':\n\n        if args.num_processes > 1:\n            processes = []\n            for rank in range(0, args.num_processes):\n                # for rank in range(0, args.num_processes):\n                p = mp.Process(target=train, args=(rank, args, shared_model))\n                p.start()\n                processes.append(p)\n\n            for p in processes:\n                p.join()\n\n        else:\n            train(0, args, shared_model)\n\n    else:\n        processes = []\n\n        # Start the eval thread\n        p = mp.Process(target=eval, args=(0, args, shared_model))\n        p.start()\n        processes.append(p)\n\n        # Start the training thread(s)\n        for rank in range(1, args.num_processes + 1):\n            # for rank in range(0, args.num_processes):\n            p = mp.Process(target=train, args=(rank, args, shared_model))\n            p.start()\n            processes.append(p)\n\n        for p in processes:\n            p.join()\n"
  },
  {
    "path": "training/train_vqa.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport h5py\nimport time\nimport argparse\nimport numpy as np\nimport os, sys, json\n\nimport torch\nfrom torch.autograd import Variable\ntorch.backends.cudnn.enabled = False\nimport torch.multiprocessing as mp\n\nfrom models import VqaLstmModel, VqaLstmCnnAttentionModel\nfrom data import EqaDataset, EqaDataLoader\nfrom metrics import VqaMetric\n\nfrom models import get_state, repackage_hidden, ensure_shared_grads\nfrom data import load_vocab\n\nimport pdb\n\n\ndef eval(rank, args, shared_model):\n\n    torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))\n\n    if args.input_type == 'ques':\n\n        model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n        model = VqaLstmModel(**model_kwargs)\n\n    elif args.input_type == 'ques,image':\n\n        model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n        model = VqaLstmCnnAttentionModel(**model_kwargs)\n\n    lossFn = torch.nn.CrossEntropyLoss().cuda()\n\n    eval_loader_kwargs = {\n        'questions_h5': getattr(args, args.eval_split + '_h5'),\n        'data_json': args.data_json,\n        'vocab': args.vocab_json,\n        'batch_size': 1,\n        'input_type': args.input_type,\n        'num_frames': args.num_frames,\n        'split': args.eval_split,\n        'max_threads_per_gpu': args.max_threads_per_gpu,\n        'gpu_id': args.gpus[rank%len(args.gpus)],\n        'to_cache': args.cache\n    }\n\n    eval_loader = EqaDataLoader(**eval_loader_kwargs)\n    print('eval_loader has %d samples' % len(eval_loader.dataset))\n\n    args.output_log_path = os.path.join(args.log_dir,\n                                        'eval_' + str(rank) + '.json')\n\n    t, epoch, best_eval_acc = 0, 0, 0\n\n    while epoch < int(args.max_epochs):\n\n        model.load_state_dict(shared_model.state_dict())\n        model.eval()\n\n        metrics = VqaMetric(\n            info={'split': args.eval_split},\n            metric_names=[\n                'loss', 'accuracy', 'mean_rank', 'mean_reciprocal_rank'\n            ],\n            log_json=args.output_log_path)\n\n        if args.input_type == 'ques':\n            for batch in eval_loader:\n                t += 1\n\n                model.cuda()\n\n                idx, questions, answers = batch\n\n                questions_var = Variable(questions.cuda())\n                answers_var = Variable(answers.cuda())\n\n                scores = model(questions_var)\n                loss = lossFn(scores, answers_var)\n\n                # update metrics\n                accuracy, ranks = metrics.compute_ranks(\n                    scores.data.cpu(), answers)\n                metrics.update([loss.data[0], accuracy, ranks, 1.0 / ranks])\n\n            print(metrics.get_stat_string(mode=0))\n\n        elif args.input_type == 'ques,image':\n            done = False\n            all_envs_loaded = eval_loader.dataset._check_if_all_envs_loaded()\n\n            while done == False:\n                for batch in eval_loader:\n                    t += 1\n\n                    model.cuda()\n\n                    idx, questions, answers, images, _, _, _ = batch\n\n                    questions_var = Variable(questions.cuda())\n                    answers_var = Variable(answers.cuda())\n                    images_var = Variable(images.cuda())\n\n                    scores, att_probs = model(images_var, questions_var)\n                    loss = lossFn(scores, answers_var)\n\n                    # update metrics\n                    accuracy, ranks = metrics.compute_ranks(\n                        scores.data.cpu(), answers)\n                    metrics.update(\n                        [loss.data[0], accuracy, ranks, 1.0 / ranks])\n\n                print(metrics.get_stat_string(mode=0))\n\n                if all_envs_loaded == False:\n                    eval_loader.dataset._load_envs()\n                    if len(eval_loader.dataset.pruned_env_set) == 0:\n                        done = True\n                else:\n                    done = True\n\n        epoch += 1\n\n        # checkpoint if best val accuracy\n        if metrics.metrics[1][0] > best_eval_acc:\n            best_eval_acc = metrics.metrics[1][0]\n            if epoch % args.eval_every == 0 and args.log == True:\n                metrics.dump_log()\n\n                model_state = get_state(model)\n\n                if args.checkpoint_path != False:\n                    ad = checkpoint['args']\n                else:\n                    ad = args.__dict__\n\n                checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}\n\n                checkpoint_path = '%s/epoch_%d_accuracy_%.04f.pt' % (\n                    args.checkpoint_dir, epoch, best_eval_acc)\n                print('Saving checkpoint to %s' % checkpoint_path)\n                torch.save(checkpoint, checkpoint_path)\n\n        print('[best_eval_accuracy:%.04f]' % best_eval_acc)\n\n\ndef train(rank, args, shared_model):\n\n    torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))\n\n    if args.input_type == 'ques':\n\n        model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n        model = VqaLstmModel(**model_kwargs)\n\n    elif args.input_type == 'ques,image':\n\n        model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n        model = VqaLstmCnnAttentionModel(**model_kwargs)\n\n    lossFn = torch.nn.CrossEntropyLoss().cuda()\n\n    optim = torch.optim.Adam(\n        filter(lambda p: p.requires_grad, shared_model.parameters()),\n        lr=args.learning_rate)\n\n    train_loader_kwargs = {\n        'questions_h5': args.train_h5,\n        'data_json': args.data_json,\n        'vocab': args.vocab_json,\n        'batch_size': args.batch_size,\n        'input_type': args.input_type,\n        'num_frames': args.num_frames,\n        'split': 'train',\n        'max_threads_per_gpu': args.max_threads_per_gpu,\n        'gpu_id': args.gpus[rank%len(args.gpus)],\n        'to_cache': args.cache\n    }\n\n    args.output_log_path = os.path.join(args.log_dir,\n                                        'train_' + str(rank) + '.json')\n\n    metrics = VqaMetric(\n        info={'split': 'train',\n              'thread': rank},\n        metric_names=['loss', 'accuracy', 'mean_rank', 'mean_reciprocal_rank'],\n        log_json=args.output_log_path)\n\n    train_loader = EqaDataLoader(**train_loader_kwargs)\n    if args.input_type == 'ques,image':\n        train_loader.dataset._load_envs(start_idx=0, in_order=True)\n\n    print('train_loader has %d samples' % len(train_loader.dataset))\n\n    t, epoch = 0, 0\n\n    while epoch < int(args.max_epochs):\n\n        if args.input_type == 'ques':\n\n            for batch in train_loader:\n\n                t += 1\n\n                model.load_state_dict(shared_model.state_dict())\n                model.train()\n                model.cuda()\n\n                idx, questions, answers = batch\n\n                questions_var = Variable(questions.cuda())\n                answers_var = Variable(answers.cuda())\n\n                scores = model(questions_var)\n                loss = lossFn(scores, answers_var)\n\n                # zero grad\n                optim.zero_grad()\n\n                # update metrics\n                accuracy, ranks = metrics.compute_ranks(scores.data.cpu(), answers)\n                metrics.update([loss.data[0], accuracy, ranks, 1.0 / ranks])\n\n                # backprop and update\n                loss.backward()\n\n                ensure_shared_grads(model.cpu(), shared_model)\n                optim.step()\n\n                if t % args.print_every == 0:\n                    print(metrics.get_stat_string())\n                    if args.log == True:\n                        metrics.dump_log()\n\n        elif args.input_type == 'ques,image':\n\n            done = False\n            all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()\n\n            while done == False:\n\n                for batch in train_loader:\n\n                    t += 1\n\n                    model.load_state_dict(shared_model.state_dict())\n                    model.train()\n                    model.cnn.eval()\n                    model.cuda()\n\n                    idx, questions, answers, images, _, _, _ = batch\n\n                    questions_var = Variable(questions.cuda())\n                    answers_var = Variable(answers.cuda())\n                    images_var = Variable(images.cuda())\n\n                    scores, att_probs = model(images_var, questions_var)\n                    loss = lossFn(scores, answers_var)\n\n                    # zero grad\n                    optim.zero_grad()\n\n                    # update metrics\n                    accuracy, ranks = metrics.compute_ranks(scores.data.cpu(), answers)\n                    metrics.update([loss.data[0], accuracy, ranks, 1.0 / ranks])\n\n                    # backprop and update\n                    loss.backward()\n\n                    ensure_shared_grads(model.cpu(), shared_model)\n                    optim.step()\n\n                    if t % args.print_every == 0:\n                        print(metrics.get_stat_string())\n                        if args.log == True:\n                            metrics.dump_log()\n\n                if all_envs_loaded == False:\n                    print('[CHECK][Cache:%d][Total:%d]' % (len(train_loader.dataset.img_data_cache),\n                        len(train_loader.dataset.env_list)))\n                    train_loader.dataset._load_envs(in_order=True)\n                    if len(train_loader.dataset.pruned_env_set) == 0:\n                        done = True\n                else:\n                    done = True\n\n        epoch += 1\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # data params\n    parser.add_argument('-train_h5', default='data/train.h5')\n    parser.add_argument('-val_h5', default='data/val.h5')\n    parser.add_argument('-test_h5', default='data/test.h5')\n    parser.add_argument('-data_json', default='data/data.json')\n    parser.add_argument('-vocab_json', default='data/vocab.json')\n\n    parser.add_argument('-train_cache_path', default=False)\n    parser.add_argument('-val_cache_path', default=False)\n\n    parser.add_argument('-mode', default='train', type=str, choices=['train','eval'])\n    parser.add_argument('-eval_split', default='val', type=str)\n\n    # model details\n    parser.add_argument(\n        '-input_type', default='ques,image', choices=['ques', 'ques,image'])\n    parser.add_argument(\n        '-num_frames', default=5,\n        type=int)  # -1 = all frames of navigation sequence\n\n    # optim params\n    parser.add_argument('-batch_size', default=20, type=int)\n    parser.add_argument('-learning_rate', default=3e-4, type=float)\n    parser.add_argument('-max_epochs', default=1000, type=int)\n\n    # bookkeeping\n    parser.add_argument('-print_every', default=50, type=int)\n    parser.add_argument('-eval_every', default=1, type=int)\n    parser.add_argument('-identifier', default='q-only')\n    parser.add_argument('-num_processes', default=1, type=int)\n    parser.add_argument('-max_threads_per_gpu', default=10, type=int)\n\n    # checkpointing\n    parser.add_argument('-checkpoint_path', default=False)\n    parser.add_argument('-checkpoint_dir', default='checkpoints/vqa/')\n    parser.add_argument('-log_dir', default='logs/vqa/')\n    parser.add_argument('-log', default=False, action='store_true')\n    parser.add_argument('-cache', default=False, action='store_true')\n    args = parser.parse_args()\n\n    args.time_id = time.strftime(\"%m_%d_%H:%M\")\n\n    try:\n        args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n        args.gpus = [int(x) for x in args.gpus]\n    except KeyError:\n        print(\"CPU not supported\")\n        exit()\n\n    if args.checkpoint_path != False:\n        print('Loading checkpoint from %s' % args.checkpoint_path)\n\n        args_to_keep = ['input_type', 'num_frames']\n\n        checkpoint = torch.load(args.checkpoint_path, map_location={'cuda:0': 'cpu'})\n\n        for i in args.__dict__:\n            if i not in args_to_keep:\n                checkpoint['args'][i] = args.__dict__[i]\n\n        args = type('new_dict', (object, ), checkpoint['args'])\n\n    args.checkpoint_dir = os.path.join(args.checkpoint_dir,\n                                       args.time_id + '_' + args.identifier)\n    args.log_dir = os.path.join(args.log_dir,\n                                args.time_id + '_' + args.identifier)\n\n    print(args.__dict__)\n\n    if not os.path.exists(args.checkpoint_dir) and args.log == True:\n        os.makedirs(args.checkpoint_dir)\n        os.makedirs(args.log_dir)\n\n    if args.input_type == 'ques':\n\n        model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n        shared_model = VqaLstmModel(**model_kwargs)\n\n    elif args.input_type == 'ques,image':\n\n        model_kwargs = {'vocab': load_vocab(args.vocab_json)}\n        shared_model = VqaLstmCnnAttentionModel(**model_kwargs)\n\n    if args.checkpoint_path != False:\n        print('Loading params from checkpoint: %s' % args.checkpoint_path)\n        shared_model.load_state_dict(checkpoint['state'])\n\n    shared_model.share_memory()\n\n    if args.mode == 'eval':\n\n        eval(0, args, shared_model)\n\n    else:\n\n        processes = []\n\n        # Start the eval thread\n        p = mp.Process(target=eval, args=(0, args, shared_model))\n        p.start()\n        processes.append(p)\n\n        # Start the training thread(s)\n        for rank in range(1, args.num_processes + 1):\n            p = mp.Process(target=train, args=(rank, args, shared_model))\n            p.start()\n            processes.append(p)\n\n        for p in processes:\n            p.join()\n"
  },
  {
    "path": "training/utils/preprocess_questions.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n# adapted from https://github.com/facebookresearch/clevr-iep/blob/master/iep/preprocess.py\n\nimport h5py\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nimport os, sys, json, random\n\nimport pdb\n\"\"\"\nTokenize a sequence, converting a string seq into a list of (string) tokens by\nsplitting on the specified delimiter. Optionally add start and end tokens.\n\"\"\"\n\n\ndef tokenize(seq,\n             delim=' ',\n             punctToRemove=None,\n             addStartToken=True,\n             addEndToken=True):\n\n    if punctToRemove is not None:\n        for p in punctToRemove:\n            seq = str(seq).replace(p, '')\n\n    tokens = str(seq).split(delim)\n    if addStartToken:\n        tokens.insert(0, '<START>')\n\n    if addEndToken:\n        tokens.append('<END>')\n\n    return tokens\n\n\ndef buildVocab(sequences,\n               minTokenCount=1,\n               delim=' ',\n               punctToRemove=None,\n               addSpecialTok=False):\n    SPECIAL_TOKENS = {\n        '<NULL>': 0,\n        '<START>': 1,\n        '<END>': 2,\n        '<UNK>': 3,\n    }\n\n    tokenToCount = {}\n    for seq in sequences:\n        seqTokens = tokenize(\n            seq,\n            delim=delim,\n            punctToRemove=punctToRemove,\n            addStartToken=False,\n            addEndToken=False)\n        for token in seqTokens:\n            if token not in tokenToCount:\n                tokenToCount[token] = 0\n            tokenToCount[token] += 1\n\n    tokenToIdx = {}\n    if addSpecialTok == True:\n        for token, idx in SPECIAL_TOKENS.items():\n            tokenToIdx[token] = idx\n    for token, count in sorted(tokenToCount.items()):\n        if count >= minTokenCount:\n            tokenToIdx[token] = len(tokenToIdx)\n\n    return tokenToIdx\n\n\ndef encode(seqTokens, tokenToIdx, allowUnk=False):\n    seqIdx = []\n    for token in seqTokens:\n        if token not in tokenToIdx:\n            if allowUnk:\n                token = '<UNK>'\n            else:\n                raise KeyError('Token \"%s\" not in vocab' % token)\n        seqIdx.append(tokenToIdx[token])\n    return seqIdx\n\n\ndef decode(seqIdx, idxToToken, delim=None, stopAtEnd=True):\n    tokens = []\n    for idx in seqIdx:\n        tokens.append(idxToToken[idx])\n        if stopAtEnd and tokens[-1] == '<END>':\n            break\n    if delim is None:\n        return tokens\n    else:\n        return delim.join(tokens)\n\n\ndef preprocessImages(obj, render_dir=False):\n    working_dir = os.path.join(render_dir, 'working')\n    path_id = obj['path_id']\n    image_paths = []\n    for i in range(len(obj['pos_queue']) - 1):\n        image_paths.append('%s/%s_%05d.jpg' % (working_dir, path_id, i + 1))\n\n    image_frames = []\n    for i in image_paths:\n        if os.path.isfile(i) == False:\n            print(i)\n            return False\n        img = imread(i, mode='RGB')\n        img = imresize(img, (224, 224), interp='bicubic')\n        img = img.transpose(2, 0, 1)\n        img = img / 255.0\n        image_frames.append(img)\n        # TODO: mean subtraction\n\n    return image_frames\n\n\ndef processActions(actions):\n    # from shortest-path-gen format\n    # 0: forward\n    # 1: left\n    # 2: right\n    # 3: stop\n    #\n    # to\n    # 0: null\n    # 1: start\n    # 2: forward\n    # 3: left\n    # 4: right\n    # 5: stop\n    # for model training\n    action_translations = {0: 2, 1: 3, 2: 4, 3: 5}\n\n    action_ids = [1]\n\n    for i in actions:\n        action_ids.append(action_translations[i])\n    return action_ids\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('-input_json', required=True)\n    parser.add_argument('-input_vocab', default=None)\n    parser.add_argument('-output_train_h5', required=True)\n    parser.add_argument('-output_val_h5', required=True)\n    parser.add_argument('-output_test_h5', required=True)\n    parser.add_argument('-output_data_json', required=True)\n    parser.add_argument('-output_vocab', default=None)\n    parser.add_argument('-num_ques', default=10000000, type=int)\n    parser.add_argument('-shortest_path_dir', required=True, type=str)\n    args = parser.parse_args()\n\n    random.seed(123)\n    np.random.seed(123)\n\n    assert args.input_vocab != None or args.output_vocab != None, \"Either input or output vocab required\"\n\n    data = json.load(open(args.input_json, 'r'))\n\n    houses = data['questions']\n    questions = []\n\n    for h in tqdm(houses):\n        print(h, len(houses[h]))\n        for q in houses[h]:\n            if len(str(q['answer']).split(' ')) > 1:\n                q['answer'] = '_'.join(q['answer'].split(' '))\n            questions.append(q)\n\n    print('Total questions: ', len(questions))\n\n    # build vocab if no vocab file provided\n    if args.input_vocab == None:\n        answerTokenToIdx = buildVocab((str(q['answer']) for q in questions\n                                       if q['answer'] != 'NIL'))\n        questionTokenToIdx = buildVocab(\n            (q['question'] for q in questions if q['answer'] != 'NIL'),\n            punctToRemove=['?'],\n            addSpecialTok=True)\n\n        vocab = {\n            'questionTokenToIdx': questionTokenToIdx,\n            'answerTokenToIdx': answerTokenToIdx,\n        }\n    else:\n        vocab = json.load(open(args.input_vocab, 'r'))\n\n    if args.output_vocab != None:\n        json.dump(vocab, open(args.output_vocab, 'w'))\n\n    # encode questions\n    idx, encoded_questions, question_types, answers, action_labels, action_lengths, pos_queue, envs, boxes = [], [], [], [], [], [], [], [], []\n    for i, q in tqdm(enumerate(questions[:args.num_ques])):\n\n        if os.path.exists(\n                os.path.join(args.shortest_path_dir, q['house'] + '_' +\n                             str(q['id']) + '.json')) == False:\n            continue\n        nav = json.load(\n            open(\n                os.path.join(args.shortest_path_dir, q['house'] + '_' +\n                             str(q['id']) + '.json'), 'r'))\n\n        idx.append(q['id'])\n        questionTokens = tokenize(\n            q['question'], punctToRemove=['?'], addStartToken=False)\n        encoded_question = encode(questionTokens, vocab['questionTokenToIdx'])\n        encoded_questions.append(encoded_question)\n        question_types.append(q['type'])\n        answers.append(vocab['answerTokenToIdx'][str(q['answer'])])\n\n        # if there are 3 positions, there will be 2 actions + <stop>\n        actions = nav['actions']\n        positions = nav['positions']\n\n        action_labels.append(processActions(actions))\n        action_lengths.append(len(actions))\n\n        pos_queue.append(positions)\n        boxes.append(q['bbox'])\n\n        envs.append(q['house'])\n\n    args.num_ques = len(idx)\n    maxALength = max(action_lengths) + 1\n\n    action_labels_mat = np.zeros(\n        (len(questions[:args.num_ques]), maxALength), dtype=np.int16)\n    action_labels_mat.fill(0)  # 0 = null\n\n    for i in tqdm(range(len(questions[:args.num_ques]))):\n        for j in range(len(action_labels[i])):\n            action_labels_mat[i][j] = action_labels[i][j]\n\n    # pad encoded questions\n    maxQLength = max(len(x) for x in encoded_questions)\n    for qe in encoded_questions:\n        while len(qe) < maxQLength:\n            qe.append(vocab['questionTokenToIdx']['<NULL>'])\n\n    # make train/test splits\n    inds = list(range(0, len(idx)))\n    random.shuffle(inds)\n\n    train_envs = data['splits']['train']\n    val_envs = data['splits']['val']\n    test_envs = data['splits']['test']\n\n    assert any([x in train_envs for x in test_envs]) == False\n    assert any([x in train_envs for x in val_envs]) == False\n\n    train_inds = [i for i in inds if envs[i] in train_envs]\n    val_inds = [i for i in inds if envs[i] in val_envs]\n    test_inds = [i for i in inds if envs[i] in test_envs]\n\n    # TRAIN\n    train_idx = [idx[i] for i in train_inds]\n    train_encoded_questions = [encoded_questions[i] for i in train_inds]\n    train_question_types = [question_types[i] for i in train_inds]\n    train_answers = [answers[i] for i in train_inds]\n    train_envs = [envs[i] for i in train_inds]\n    train_pos_queue = [pos_queue[i] for i in train_inds]\n    train_boxes = [boxes[i] for i in train_inds]\n\n    train_action_labels = action_labels_mat[train_inds]\n    train_action_lengths = [action_lengths[i] for i in train_inds]\n\n    # VAL\n    val_idx = [idx[i] for i in val_inds]\n    val_encoded_questions = [encoded_questions[i] for i in val_inds]\n    val_question_types = [question_types[i] for i in val_inds]\n    val_answers = [answers[i] for i in val_inds]\n    val_envs = [envs[i] for i in val_inds]\n    val_pos_queue = [pos_queue[i] for i in val_inds]\n    val_boxes = [boxes[i] for i in val_inds]\n\n    val_action_labels = action_labels_mat[val_inds]\n    val_action_lengths = [action_lengths[i] for i in val_inds]\n\n    # TEST\n    test_idx = [idx[i] for i in test_inds]\n    test_encoded_questions = [encoded_questions[i] for i in test_inds]\n    test_question_types = [question_types[i] for i in test_inds]\n    test_answers = [answers[i] for i in test_inds]\n    test_envs = [envs[i] for i in test_inds]\n    test_pos_queue = [pos_queue[i] for i in test_inds]\n    test_boxes = [boxes[i] for i in test_inds]\n\n    test_action_labels = action_labels_mat[test_inds]\n    test_action_lengths = [action_lengths[i] for i in test_inds]\n\n    # parse envs\n    all_envs = list(set(envs))\n    train_env_idx = [all_envs.index(x) for x in train_envs]\n    val_env_idx = [all_envs.index(x) for x in val_envs]\n    test_env_idx = [all_envs.index(x) for x in test_envs]\n\n    # write h5 files\n    print('Writing hdf5')\n\n    train_encoded_questions = np.asarray(\n        train_encoded_questions, dtype=np.int16)\n    print('Train', train_encoded_questions.shape)\n    with h5py.File(args.output_train_h5, 'w') as f:\n        f.create_dataset('idx', data=np.asarray(train_idx))\n        f.create_dataset('questions', data=train_encoded_questions)\n        f.create_dataset('answers', data=np.asarray(train_answers))\n        f.create_dataset(\n            'action_labels',\n            data=np.asarray(train_action_labels),\n            dtype=np.int16)\n        f.create_dataset(\n            'action_lengths',\n            data=np.asarray(train_action_lengths),\n            dtype=np.int16)\n\n    val_encoded_questions = np.asarray(val_encoded_questions, dtype=np.int16)\n    print('Val', val_encoded_questions.shape)\n    with h5py.File(args.output_val_h5, 'w') as f:\n        f.create_dataset('idx', data=np.asarray(val_idx))\n        f.create_dataset('questions', data=val_encoded_questions)\n        f.create_dataset('answers', data=np.asarray(val_answers))\n        f.create_dataset(\n            'action_labels',\n            data=np.asarray(val_action_labels),\n            dtype=np.int16)\n        f.create_dataset(\n            'action_lengths',\n            data=np.asarray(val_action_lengths),\n            dtype=np.int16)\n\n    test_encoded_questions = np.asarray(test_encoded_questions, dtype=np.int16)\n    print('Test', test_encoded_questions.shape)\n    with h5py.File(args.output_test_h5, 'w') as f:\n        f.create_dataset('idx', data=np.asarray(test_idx))\n        f.create_dataset('questions', data=test_encoded_questions)\n        f.create_dataset('answers', data=np.asarray(test_answers))\n        f.create_dataset(\n            'action_labels',\n            data=np.asarray(test_action_labels),\n            dtype=np.int16)\n        f.create_dataset(\n            'action_lengths',\n            data=np.asarray(test_action_lengths),\n            dtype=np.int16)\n\n    json.dump({\n        'envs': all_envs,\n        'train_env_idx': train_env_idx,\n        'val_env_idx': val_env_idx,\n        'test_env_idx': test_env_idx,\n        'train_pos_queue': train_pos_queue,\n        'val_pos_queue': val_pos_queue,\n        'test_pos_queue': test_pos_queue,\n        'train_boxes': train_boxes,\n        'val_boxes': val_boxes,\n        'test_boxes': test_boxes\n    }, open(args.output_data_json, 'w'))\n"
  },
  {
    "path": "training/utils/preprocess_questions_pkl.py",
    "content": "# adapted from https://github.com/facebookresearch/clevr-iep/blob/master/iep/preprocess.py\n\nimport h5py\nimport argparse\nimport numpy as np\n#from tqdm import tqdm\nimport os, sys, json, random\nimport pickle as pkl\n\nimport pdb\n\"\"\"\nTokenize a sequence, converting a string seq into a list of (string) tokens by\nsplitting on the specified delimiter. Optionally add start and end tokens.\n\"\"\"\n\n\ndef tokenize(seq,\n             delim=' ',\n             punctToRemove=None,\n             addStartToken=True,\n             addEndToken=True):\n\n    if punctToRemove is not None:\n        for p in punctToRemove:\n            seq = str(seq).replace(p, '')\n\n    tokens = str(seq).split(delim)\n    if addStartToken:\n        tokens.insert(0, '<START>')\n\n    if addEndToken:\n        tokens.append('<END>')\n\n    return tokens\n\n\ndef buildVocab(sequences,\n               minTokenCount=1,\n               delim=' ',\n               punctToRemove=None,\n               addSpecialTok=False):\n    SPECIAL_TOKENS = {\n        '<NULL>': 0,\n        '<START>': 1,\n        '<END>': 2,\n        '<UNK>': 3,\n    }\n\n    tokenToCount = {}\n    for seq in sequences:\n        seqTokens = tokenize(\n            seq,\n            delim=delim,\n            punctToRemove=punctToRemove,\n            addStartToken=False,\n            addEndToken=False)\n        for token in seqTokens:\n            if token not in tokenToCount:\n                tokenToCount[token] = 0\n            tokenToCount[token] += 1\n\n    tokenToIdx = {}\n    if addSpecialTok == True:\n        for token, idx in SPECIAL_TOKENS.items():\n            tokenToIdx[token] = idx\n    for token, count in sorted(tokenToCount.items()):\n        if count >= minTokenCount:\n            tokenToIdx[token] = len(tokenToIdx)\n\n    return tokenToIdx\n\n\ndef encode(seqTokens, tokenToIdx, allowUnk=False):\n    seqIdx = []\n    for token in seqTokens:\n        if token not in tokenToIdx:\n            if allowUnk:\n                token = '<UNK>'\n            else:\n                raise KeyError('Token \"%s\" not in vocab' % token)\n        seqIdx.append(tokenToIdx[token])\n    return seqIdx\n\n\ndef decode(seqIdx, idxToToken, delim=None, stopAtEnd=True):\n    tokens = []\n    for idx in seqIdx:\n        tokens.append(idxToToken[idx])\n        if stopAtEnd and tokens[-1] == '<END>':\n            break\n    if delim is None:\n        return tokens\n    else:\n        return delim.join(tokens)\n\n\ndef preprocessImages(obj, render_dir=False):\n    working_dir = os.path.join(render_dir, 'working')\n    path_id = obj['path_id']\n    image_paths = []\n    for i in range(len(obj['pos_queue']) - 1):\n        image_paths.append('%s/%s_%05d.jpg' % (working_dir, path_id, i + 1))\n\n    image_frames = []\n    for i in image_paths:\n        if os.path.isfile(i) == False:\n            print(i)\n            return False\n        img = imread(i, mode='RGB')\n        img = imresize(img, (224, 224), interp='bicubic')\n        img = img.transpose(2, 0, 1)\n        img = img / 255.0\n        image_frames.append(img)\n        # TODO: mean subtraction\n\n    return image_frames\n\n\ndef processActions(actions):\n    # from shortest-path-gen format\n    # 0: forward\n    # 1: left\n    # 2: right\n    # 3: stop\n    #\n    # to\n    # 0: null\n    # 1: start\n    # 2: forward\n    # 3: left\n    # 4: right\n    # 5: stop\n    # for model training\n    action_translations = {0: 2, 1: 3, 2: 4, 3: 5}\n\n    action_ids = [1]\n\n    for i in actions:\n        action_ids.append(action_translations[i])\n    return action_ids\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('-input_json', required=True)\n    parser.add_argument('-input_vocab', default=None)\n    parser.add_argument('-output_train_h5', required=True)\n    parser.add_argument('-output_val_h5', required=True)\n    parser.add_argument('-output_test_h5', required=True)\n    parser.add_argument('-output_data_json', required=True)\n    parser.add_argument('-output_vocab', default=None)\n    parser.add_argument('-num_ques', default=10000000, type=int)\n    parser.add_argument('-shortest_path_dir', required=True, type=str)\n    args = parser.parse_args()\n\n    random.seed(123)\n    np.random.seed(123)\n\n    assert args.input_vocab != None or args.output_vocab != None, \"Either input or output vocab required\"\n\n    data = json.load(open(args.input_json, 'r'))\n\n    houses = data['questions']\n    questions = []\n\n    for h in houses:\n        print(h, len(houses[h]))\n        for q in houses[h]:\n            if len(str(q['answer']).split(' ')) > 1:\n                q['answer'] = '_'.join(q['answer'].split(' '))\n            questions.append(q)\n\n    print('Total questions: ', len(questions))\n\n    # build vocab if no vocab file provided\n    if args.input_vocab == None:\n        answerTokenToIdx = buildVocab((str(q['answer']) for q in questions\n                                       if q['answer'] != 'NIL'))\n        questionTokenToIdx = buildVocab(\n            (q['question'] for q in questions if q['answer'] != 'NIL'),\n            punctToRemove=['?'],\n            addSpecialTok=True)\n\n        vocab = {\n            'questionTokenToIdx': questionTokenToIdx,\n            'answerTokenToIdx': answerTokenToIdx,\n        }\n    else:\n        vocab = json.load(open(args.input_vocab, 'r'))\n\n    if args.output_vocab != None:\n        json.dump(vocab, open(args.output_vocab, 'w'))\n\n    # encode questions\n    idx, encoded_questions, question_types, answers, action_labels, action_lengths, pos_queue, envs, boxes = [], [], [], [], [], [], [], [], []\n    for i, q in enumerate(questions[:args.num_ques]):\n\n        if os.path.exists(\n                os.path.join(args.shortest_path_dir, q['house'] + '_' +\n                             str(q['id']) + '.pkl')) == False:\n            continue\n        \n        try:\n            nav = pkl.load(\n                open(\n                    os.path.join(args.shortest_path_dir, q['house'] + '_' +\n                                 str(q['id']) + '.pkl'), 'rb'))\n        except:\n            continue \n\n        idx.append(q['id'])\n        questionTokens = tokenize(\n            q['question'], punctToRemove=['?'], addStartToken=False)\n        encoded_question = encode(questionTokens, vocab['questionTokenToIdx'])\n        encoded_questions.append(encoded_question)\n        question_types.append(q['type'])\n        answers.append(vocab['answerTokenToIdx'][str(q['answer'])])\n\n        # if there are 3 positions, there will be 2 actions + <stop>\n        actions = nav['actions']\n        positions = nav['positions']\n\n        action_labels.append(processActions(actions))\n        action_lengths.append(len(actions))\n\n        pos_queue.append(positions)\n        boxes.append(q['bbox'])\n\n        envs.append(q['house'])\n\n        assert q['question'] == nav['question']\n\n    args.num_ques = len(idx)\n    maxALength = max(action_lengths) + 1\n\n    action_labels_mat = np.zeros(\n        (len(questions[:args.num_ques]), maxALength), dtype=np.int16)\n    action_labels_mat.fill(0)  # 0 = null\n\n    for i in range(len(questions[:args.num_ques])):\n        for j in range(len(action_labels[i])):\n            action_labels_mat[i][j] = action_labels[i][j]\n\n    # pad encoded questions\n    maxQLength = max(len(x) for x in encoded_questions)\n    for qe in encoded_questions:\n        while len(qe) < maxQLength:\n            qe.append(vocab['questionTokenToIdx']['<NULL>'])\n\n    # make train/test splits\n    inds = list(range(0, len(idx)))\n    random.shuffle(inds)\n\n    train_envs = data['splits']['train']\n    val_envs = data['splits']['val']\n    test_envs = data['splits']['test']\n\n    assert any([x in train_envs for x in test_envs]) == False\n    assert any([x in train_envs for x in val_envs]) == False\n\n    train_inds = [i for i in inds if envs[i] in train_envs]\n    val_inds = [i for i in inds if envs[i] in val_envs]\n    test_inds = [i for i in inds if envs[i] in test_envs]\n\n    # TRAIN\n    train_idx = [idx[i] for i in train_inds]\n    train_encoded_questions = [encoded_questions[i] for i in train_inds]\n    train_question_types = [question_types[i] for i in train_inds]\n    train_answers = [answers[i] for i in train_inds]\n    train_envs = [envs[i] for i in train_inds]\n    train_pos_queue = [pos_queue[i] for i in train_inds]\n    train_boxes = [boxes[i] for i in train_inds]\n\n    train_action_labels = action_labels_mat[train_inds]\n    train_action_lengths = [action_lengths[i] for i in train_inds]\n\n    # VAL\n    val_idx = [idx[i] for i in val_inds]\n    val_encoded_questions = [encoded_questions[i] for i in val_inds]\n    val_question_types = [question_types[i] for i in val_inds]\n    val_answers = [answers[i] for i in val_inds]\n    val_envs = [envs[i] for i in val_inds]\n    val_pos_queue = [pos_queue[i] for i in val_inds]\n    val_boxes = [boxes[i] for i in val_inds]\n\n    val_action_labels = action_labels_mat[val_inds]\n    val_action_lengths = [action_lengths[i] for i in val_inds]\n\n    # TEST\n    test_idx = [idx[i] for i in test_inds]\n    test_encoded_questions = [encoded_questions[i] for i in test_inds]\n    test_question_types = [question_types[i] for i in test_inds]\n    test_answers = [answers[i] for i in test_inds]\n    test_envs = [envs[i] for i in test_inds]\n    test_pos_queue = [pos_queue[i] for i in test_inds]\n    test_boxes = [boxes[i] for i in test_inds]\n\n    test_action_labels = action_labels_mat[test_inds]\n    test_action_lengths = [action_lengths[i] for i in test_inds]\n\n    # parse envs\n    all_envs = list(set(envs))\n    train_env_idx = [all_envs.index(x) for x in train_envs]\n    val_env_idx = [all_envs.index(x) for x in val_envs]\n    test_env_idx = [all_envs.index(x) for x in test_envs]\n\n    # write h5 files\n    print('Writing hdf5')\n\n    train_encoded_questions = np.asarray(\n        train_encoded_questions, dtype=np.int16)\n    print('Train', train_encoded_questions.shape)\n    with h5py.File(args.output_train_h5, 'w') as f:\n        f.create_dataset('idx', data=np.asarray(train_idx))\n        f.create_dataset('questions', data=train_encoded_questions)\n        f.create_dataset('answers', data=np.asarray(train_answers))\n        f.create_dataset(\n            'action_labels',\n            data=np.asarray(train_action_labels),\n            dtype=np.int16)\n        f.create_dataset(\n            'action_lengths',\n            data=np.asarray(train_action_lengths),\n            dtype=np.int16)\n\n    val_encoded_questions = np.asarray(val_encoded_questions, dtype=np.int16)\n    print('Val', val_encoded_questions.shape)\n    with h5py.File(args.output_val_h5, 'w') as f:\n        f.create_dataset('idx', data=np.asarray(val_idx))\n        f.create_dataset('questions', data=val_encoded_questions)\n        f.create_dataset('answers', data=np.asarray(val_answers))\n        f.create_dataset(\n            'action_labels',\n            data=np.asarray(val_action_labels),\n            dtype=np.int16)\n        f.create_dataset(\n            'action_lengths',\n            data=np.asarray(val_action_lengths),\n            dtype=np.int16)\n\n    test_encoded_questions = np.asarray(test_encoded_questions, dtype=np.int16)\n    print('Test', test_encoded_questions.shape)\n    with h5py.File(args.output_test_h5, 'w') as f:\n        f.create_dataset('idx', data=np.asarray(test_idx))\n        f.create_dataset('questions', data=test_encoded_questions)\n        f.create_dataset('answers', data=np.asarray(test_answers))\n        f.create_dataset(\n            'action_labels',\n            data=np.asarray(test_action_labels),\n            dtype=np.int16)\n        f.create_dataset(\n            'action_lengths',\n            data=np.asarray(test_action_lengths),\n            dtype=np.int16)\n\n    json.dump({\n        'envs': all_envs,\n        'train_env_idx': train_env_idx,\n        'val_env_idx': val_env_idx,\n        'test_env_idx': test_env_idx,\n        'train_pos_queue': train_pos_queue,\n        'val_pos_queue': val_pos_queue,\n        'test_pos_queue': test_pos_queue,\n        'train_boxes': train_boxes,\n        'val_boxes': val_boxes,\n        'test_boxes': test_boxes\n    }, open(args.output_data_json, 'w'))\n"
  },
  {
    "path": "utils/house3d.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport cv2\nimport csv\nimport copy\nimport os, sys\nimport itertools\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom House3D.objrender import Vec3\n\nimport pdb\n\n\nclass House3DUtils():\n    def __init__(\n            self,\n            env,\n            rotation_sensitivity=9,\n            move_sensitivity=0.5,\n            build_graph=False,\n            graph_dir='/path/to/3d-graphs',\n            target_obj_conn_map_dir='/path/to/target_obj_connmaps',\n            debug=True,\n            load_semantic_classes=True,\n            collision_reward=0.0,\n            success_reward=1.0,\n            dist_reward_scale=0.005,\n            seeing_rwd=False):\n        self.env = env\n        self.debug = debug\n\n        self.rotation_sensitivity = rotation_sensitivity\n        self.move_sensitivity = move_sensitivity\n\n        self.angles = [x for x in range(-180, 180, self.rotation_sensitivity)]\n        self.angle_strings = {1: 'right', -1: 'left'}\n\n        self.dirs, self.angle_map = self.calibrate_steps(reset=True)\n        self.move_multiplier = self.move_sensitivity / np.array([np.abs(x).sum() for x in self.dirs]).mean()\n\n        self.graph_dir = graph_dir\n        self.graph = None\n\n        self.target_obj_conn_map_dir = target_obj_conn_map_dir\n\n        if build_graph == True:\n            if os.path.exists(\n                    os.path.join(graph_dir,\n                                 self.env.house.house['id'] + '.pkl')):\n                self.load_graph(\n                    os.path.join(graph_dir,\n                                 self.env.house.house['id'] + '.pkl'))\n            else:\n                self.build_graph(\n                    save_path=os.path.join(\n                        graph_dir, self.env.house.house['id'] + '.pkl'))\n\n        self.rooms, self.objects = self._parse()\n\n        self.collision_reward = collision_reward\n        self.success_reward = success_reward\n        self.dist_reward_scale = dist_reward_scale\n        self.seeing_rwd = seeing_rwd\n\n        if load_semantic_classes == True:\n            self._load_semantic_classes()\n\n    # Shortest paths are computed in 1000 x 1000 grid coordinates.\n    # One step in the SUNCG continuous coordinate system however, can be\n    # multiple grids in the grid coordinate system (since turns aren't 90 deg).\n    # So even though the grid shortest path is fine-grained,\n    # an equivalent best-fit path in SUNCG continuous coordinates\n    # has to be computed by simulating steps. Sucks, but yeah.\n    #\n    # For now, we first explicitly calibrate how many steps in the gridworld\n    # correspond to one step in continuous world, across all directions\n    def calibrate_steps(self, reset=True):\n        mults, angle_map = [], {}\n\n        cx, cy = self.env.house.to_coor(50, 50)\n        if reset == True:\n            self.env.reset(x=cx, y=cy)\n\n        for i in range(len(self.angles)):\n            yaw = self.angles[i]\n\n            self.env.cam.yaw = yaw\n            self.env.cam.updateDirection()\n\n            x1, y1 = self.env.house.to_grid(self.env.cam.pos.x,\n                                            self.env.cam.pos.z)\n\n            pos = self.env.cam.pos\n            pos = pos + self.env.cam.front * self.move_sensitivity\n\n            x2, y2 = self.env.house.to_grid(pos.x, pos.z)\n\n            mult = np.array([x2, y2]) - np.array([x1, y1])\n            mult = (mult[0], mult[1])\n\n            angle_map[mult] = yaw\n            mults.append(mult)\n\n        return mults, angle_map\n\n    # 0: forward\n    # 1: left\n    # 2: right\n    # 3: stop\n    #\n    # returns observation, reward, done, info\n    def step(self, action, step_reward=False):\n        if action not in [0, 1, 2, 3]:\n            raise IndexError\n\n        if step_reward == True:\n            pos = self.env.cam.pos\n            x1, y1 = self.env.house.to_grid(self.env.cam.pos.x, self.env.cam.pos.z)\n            init_target_dist = self.env.house.connMap[x1, y1]\n\n        reward = 0\n        done = False\n\n        if action == 0:\n            mv = self.env.move_forward(\n                dist_fwd=self.move_sensitivity, dist_hor=0)\n            obs = self.env.render()\n            if mv == False:  # collision\n                reward -= self.collision_reward\n            elif mv != False and step_reward == True:\n                # evaluate connMap dist here\n                x2, y2 = self.env.house.to_grid(self.env.cam.pos.x,\n                                                self.env.cam.pos.z)\n                final_target_dist = self.env.house.connMap[x2, y2]\n                reward += self.dist_reward_scale * ((init_target_dist - final_target_dist) / np.abs(\n                    self.dirs[self.angles.index(self.env.cam.yaw % 180)]).sum())\n\n        elif action == 1:\n            self.env.rotate(-self.rotation_sensitivity)\n            obs = self.env.render()\n\n        elif action == 2:\n            self.env.rotate(self.rotation_sensitivity)\n            obs = self.env.render()\n\n        elif action == 3:\n            done = True\n            obs = self.env.render()\n\n        return obs, reward, done\n\n    # pos: [x, y, z, yaw], or objrender.Vec3\n    def get_dist_to_target(self, pos):\n        if isinstance(pos, Vec3) == True:\n            x, y = self.env.house.to_grid(pos.x, pos.z)\n        else:\n            x, y = self.env.house.to_grid(pos[0], pos[2])\n        dist = self.env.house.connMap[x, y]\n        return self.move_multiplier * dist\n\n    def is_inside_room(self, pos, room):\n        if isinstance(pos, Vec3) == True:\n            x = pos.x\n            y = pos.z\n        else:\n            x = pos[0]\n            y = pos[2]\n        if x >= room['bbox']['min'][0] and x <= room['bbox']['max'][0] and \\\n            y >= room['bbox']['min'][2] and y <= room['bbox']['max'][2]:\n            return True\n        return False\n\n    # takes 200-300 seconds(!) when rotation_sensitivity == 9\n    def build_graph(self, save_path=None):\n        import time\n        start_time = time.time()\n\n        collide_res = self.env.house.n_row\n\n        from dijkstar import Graph\n\n        visit = dict()\n        self.graph = Graph()\n\n        self.mock_obs_map = np.zeros(\n            (collide_res + 1, collide_res + 1), dtype=np.uint8)\n        self.mock_obs_map[np.where(self.env.house.connMap == -1)] = 1\n\n        for x in range(collide_res + 1):\n            for y in range(collide_res + 1):\n                pos = (x, y)\n                if self.env.house.canMove(x, y) and pos not in visit:\n                    que = [pos]\n                    visit[pos] = True\n                    ptr = 0\n                    while ptr < len(que):\n                        cx, cy = que[ptr]\n                        ptr += 1\n\n                        # add all angles for (cx, cy) here\n                        # connect first and last\n                        for ang in range(len(self.angles) - 1):\n                            self.graph.add_edge((cx, cy, self.angles[ang]),\n                                                (cx, cy, self.angles[ang + 1]),\n                                                {\n                                                    'cost': 1\n                                                })\n                            self.graph.add_edge((cx, cy, self.angles[ang + 1]),\n                                                (cx, cy, self.angles[ang]), {\n                                                    'cost': 1\n                                                })\n                        self.graph.add_edge((cx, cy, self.angles[-1]),\n                                            (cx, cy, self.angles[0]), {\n                                                'cost': 1\n                                            })\n                        self.graph.add_edge((cx, cy, self.angles[0]),\n                                            (cx, cy, self.angles[-1]), {\n                                                'cost': 1\n                                            })\n\n                        for deti in range(len(self.dirs)):\n                            det = self.dirs[deti]\n                            tx, ty = cx + det[0], cy + det[1]\n                            if (self.env.house.inside(tx, ty) and\n                                    self.mock_obs_map[min(cx, tx):max(cx, tx)+1,\n                                                      min(cy, ty):max(cy, ty)+1].sum() == 0):\n                                # make changes here to add edges for angle increments as well\n                                #\n                                # cost = 1 from one angle to the next,\n                                # and connect first and last\n                                # this would be for different angles for same tx, ty\n                                #\n                                # then there would be connections for same angle\n                                # and from (cx, cy) to (tx, ty)\n                                self.graph.add_edge(\n                                    (cx, cy, self.angle_map[self.dirs[deti]]),\n                                    (tx, ty, self.angle_map[self.dirs[deti]]),\n                                    {\n                                        'cost': 1\n                                    })\n                                tp = (tx, ty)\n                                if tp not in visit:\n                                    visit[tp] = True\n                                    que.append(tp)\n\n        if self.debug == True:\n            print(\"--- %s seconds to build the graph ---\" %\n                  (time.time() - start_time))\n\n        if save_path != None:\n            start_time = time.time()\n\n            print(\"saving graph to %s\" % (save_path))\n            self.graph.dump(save_path)\n\n            if self.debug == True:\n                print(\"--- %s seconds to save the graph ---\" %\n                      (time.time() - start_time))\n\n    def load_graph(self, path):\n        import time\n        start_time = time.time()\n\n        from dijkstar import Graph\n\n        self.graph = Graph()\n        self.graph.load(path)\n\n        if self.debug == True:\n            print(\"--- %s seconds to load the graph ---\" %\n                  (time.time() - start_time))\n\n    # takes 1-5 seconds when rotation_sensitivity == 9\n    def compute_shortest_path(self, source, target, graph=None):\n        from dijkstar import find_path\n\n        if graph == None:\n            if self.graph == None:\n                if os.path.exists(\n                        os.path.join(self.graph_dir,\n                                     self.env.house.house['id'] + '.pkl')):\n                    self.load_graph(\n                        os.path.join(self.graph_dir,\n                                     self.env.house.house['id'] + '.pkl'))\n                else:\n                    self.build_graph(\n                        save_path=os.path.join(\n                            graph_dir, self.env.house.house['id'] + '.pkl'))\n            graph = self.graph\n\n        cost_func = lambda u, v, e, prev_e: e['cost']\n        shortest_path = find_path(graph, source, target, cost_func=cost_func)\n\n        return shortest_path\n\n    def fit_grid_path_to_suncg(self, nodes, init_yaw=None, back_skip=2):\n\n        # don't mess with the originals\n        nodes = copy.deepcopy(nodes)\n\n        # set initial position\n        x, y = self.env.house.to_coor(nodes[0][0], nodes[0][1], True)\n        x, y = x.astype(np.float32).item(), y.astype(np.float32).item()\n\n        self.env.cam.pos.x, self.env.cam.pos.y, self.env.cam.pos.z = x, self.env.house.robotHei, y\n        if init_yaw == None:\n            self.env.cam.yaw = np.random.choice(self.angles)\n        else:\n            self.env.cam.yaw = init_yaw\n        self.env.cam.updateDirection()\n\n        pos_queue, action_queue = [], []\n\n        current_pos = self._vec_to_array(self.env.cam.pos, self.env.cam.yaw)\n        pos_queue = pos_queue + [current_pos]\n\n        ptr = 0\n\n        while ptr < len(nodes) - 1:\n            turned = False\n\n            # target rotation\n            target_yaw = self.angle_map[tuple(\n                np.array(nodes[ptr]) - np.array(nodes[ptr + 1]))]\n\n            # turn\n            if target_yaw != current_pos[3]:\n                p_q, a_q = self.get_rotate_steps(current_pos, target_yaw)\n\n                pos_queue = pos_queue + p_q\n                action_queue = action_queue + a_q\n\n                self.env.cam.yaw = target_yaw\n                self.env.cam.updateDirection()\n\n                turned = True\n                current_pos = self._vec_to_array(self.env.cam.pos,\n                                                 self.env.cam.yaw)\n\n            # move\n            cx, cz = self.env.house.to_coor(nodes[ptr + 1][0],\n                                            nodes[ptr + 1][1], True)\n\n            # if collision, find another sub-path, and delete that edge\n            if self.env.move(cx, cz) == False:\n                if nodes[ptr + 1] in self.graph[nodes[ptr]]:\n                    del self.graph[nodes[ptr]][nodes[ptr + 1]]\n                    print('deleted', nodes[ptr], nodes[ptr + 1])\n\n                # delete the turns\n                if turned == True:\n                    pos_queue = pos_queue[:-len(p_q)]\n                    action_queue = action_queue[:-len(a_q)]\n\n                if back_skip != 0:\n                    pos_queue = pos_queue[:-back_skip]\n                    action_queue = action_queue[:-back_skip]\n\n                dest_ptr = ptr + 1\n                ptr = ptr - back_skip\n\n                sub_shortest_path = self.compute_shortest_path(\n                    nodes[ptr], nodes[dest_ptr])\n                nodes = nodes[:ptr] + sub_shortest_path.nodes + nodes[dest_ptr\n                                                                      + 1:]\n\n                current_pos = pos_queue[-1]\n            else:\n                # this is the new position the agent moved to\n                current_pos = self._vec_to_array(self.env.cam.pos,\n                                                 self.env.cam.yaw)\n\n                assert current_pos[3] == pos_queue[-1][3] and (\n                    current_pos[0] != pos_queue[-1][0]\n                    or current_pos[2] != pos_queue[-1][2])\n\n                pos_queue = pos_queue + [current_pos]\n                action_queue = action_queue + ['fwd']\n\n                ptr = ptr + 1\n\n        action_queue.append('stop')\n\n        return pos_queue, action_queue\n\n    # pos contains [x, y, z, yaw]\n    # given a position and target yaw, this function\n    # computes actions needed to turn there\n    def get_rotate_steps(self, pos, target_yaw):\n\n        direction = np.random.choice([1, -1])\n\n        cur_yaw = pos[-1]\n        ptr = self.angles.index(cur_yaw)\n        pos_queue, action_queue = [], []\n\n        while cur_yaw != target_yaw:\n            if len(pos_queue) == len(self.angles) // 2:\n                # reset\n                direction = direction * -1\n                cur_yaw = pos[-1]\n                ptr = self.angles.index(cur_yaw)\n                pos_queue, action_queue = [], []\n\n            ptr = (ptr + direction) % len(self.angles)\n            cur_yaw = self.angles[ptr]\n\n            pos_queue.append([pos[0], pos[1], pos[2], self.angles[ptr]])\n            action_queue.append(self.angle_strings[direction])\n\n        return pos_queue, action_queue\n\n    def _vec_to_array(self, pos, yaw):\n        return [pos.x, pos.y, pos.z, yaw]\n\n    # render images from camera position queue\n    def render_images_from_pos_queue(self,\n                                     pos_queue=[],\n                                     img_dir='tmp/images',\n                                     actions=None,\n                                     values=None,\n                                     rewards=None):\n        if len(pos_queue) == 0:\n            return False\n\n        action_map = {0: 'FRWD', 1: 'LEFT', 2: 'RGHT', 3: 'STOP'}\n\n        import scipy.misc\n\n        sgx, sgy = self.env.house.to_grid(pos_queue[0][0], pos_queue[0][2])\n        tgx, tgy = self.env.house.to_grid(pos_queue[-1][0], pos_queue[-1][2])\n\n        for i in range(len(pos_queue)):\n            # set position\n            p = pos_queue[i]\n            self.env.reset(x=p[0], y=p[2], yaw=p[3])\n\n            # save image\n            image = np.array(self.env.render(), copy=False)\n\n            # put some text\n            text = \"[%02d]\" % (i + 1)\n\n            if actions != None and i < len(actions):\n                text += \"[%s]\" % action_map[actions[i]]\n\n            if values != None and i < len(values):\n                text += \"[V%.03f]\" % values[i]\n\n            if rewards != None and i > 0 and i <= len(rewards):\n                text += \"[R%.03f]\" % rewards[i - 1]\n\n            image = cv2.putText(\n                img=np.copy(image),\n                text=text,\n                org=(20, 30),\n                fontFace=3,\n                fontScale=0.4,\n                color=(255, 255, 255),\n                thickness=1)\n\n            scipy.misc.toimage(image).save(\n                '%s/%s_%04d_%04d_%04d_%04d_%05d_%05d.jpg' %\n                (img_dir, self.env.house.house['id'], sgx, sgy, tgx, tgy,\n                 i + 1, len(pos_queue)))\n\n        return True\n\n    # render video from camera position queue\n    #\n    # NOTE: call `render_images_from_pos_queue` before calling this\n    def render_video_from_pos_queue(self,\n                                    pos_queue=[],\n                                    img_dir='tmp/images',\n                                    vid_dir='tmp/videos',\n                                    fps=[5],\n                                    tag_name='piano'):\n        if len(pos_queue) == 0:\n            return False\n\n        import subprocess\n\n        sgx, sgy = self.env.house.to_grid(pos_queue[0][0], pos_queue[0][2])\n        tgx, tgy = self.env.house.to_grid(pos_queue[-1][0], pos_queue[-1][2])\n\n        for fp in fps:\n            subprocess.Popen([\n                '/srv/share/abhshkdz/local/bin/ffmpeg', '-f', 'image2', '-r',\n                str(fp), '-i',\n                '%s/%s_%04d_%04d_%04d_%04d' %\n                (img_dir, self.env.house.house['id'], sgx, sgy, tgx, tgy) +\n                '_%05d_' + '%05d.jpg' % (len(pos_queue)), '-vcodec', 'libx264',\n                '-crf', '25', '-y',\n                '%s/%s_%04d_%04d_%s_%04d_%04d_%d.mp4' %\n                (vid_dir, self.env.house.house['id'], sgx, sgy, tag_name, tgx,\n                 tgy, fp)\n            ])\n\n            if self.debug == True:\n                print('Rendered video to ' +\n                      '%s/%s_%04d_%04d_%s_%04d_%04d_%d.mp4' %\n                      (vid_dir, self.env.house.house['id'], sgx, sgy, tag_name,\n                       tgx, tgy, fp))\n\n        return True\n\n    # Go over all nodes of house environment and accumulate objects room-wise.\n    def _parse(self, levelsToExplore=[0]):\n        rooms, objects = [], {}\n        data = self.env.house.house\n\n        modelCategoryMapping = {}\n\n        import csv\n        csvFile = csv.reader(open(self.env.house.metaDataFile, 'r'))\n        headers = next(csvFile)\n\n        for row in csvFile:\n            modelCategoryMapping[row[headers.index('model_id')]] = {\n                headers[x]: row[x]\n                for x in range(2, len(headers))  # 0 is index, 1 is model_id\n            }\n\n        for i in levelsToExplore:\n            for j in range(len(data['levels'][i]['nodes'])):\n                assert data['levels'][i]['nodes'][j]['type'] != 'Box'\n\n                if 'valid' in data['levels'][i]['nodes'][j]:\n                    assert data['levels'][i]['nodes'][j]['valid'] == 1\n\n                # Rooms\n                if data['levels'][i]['nodes'][j]['type'] == 'Room':\n                    if 'roomTypes' not in data['levels'][i]['nodes'][j]:\n                        continue\n\n                    # Can rooms have more than one type?\n                    # Yes, they can; just found ['Living_Room', 'Dining_Room', 'Kitchen']\n                    # assert len(data['levels'][i]['nodes'][j]['roomTypes']) <= 3\n\n                    roomType = [\n                        # ' '.join(x.lower().split('_'))\n                        x.lower()\n                        for x in data['levels'][i]['nodes'][j]['roomTypes']\n                    ]\n\n                    nodes = data['levels'][i]['nodes'][j][\n                        'nodeIndices'] if 'nodeIndices' in data['levels'][i][\n                            'nodes'][j] else []\n                    rooms.append({\n                        'type':\n                        roomType,\n                        'bbox':\n                        data['levels'][i]['nodes'][j]['bbox'],\n                        'nodes':\n                        nodes,\n                        'model_id':\n                        data['levels'][i]['nodes'][j]['modelId']\n                    })\n\n                # Objects\n                elif data['levels'][i]['nodes'][j]['type'] == 'Object':\n                    if 'materials' not in data['levels'][i]['nodes'][j]:\n                        material = []\n                    else:\n                        material = data['levels'][i]['nodes'][j]['materials']\n                    objects[data['levels'][i]['nodes'][j]['id']] = {\n                        'id':\n                        data['levels'][i]['nodes'][j]['id'],\n                        'model_id':\n                        data['levels'][i]['nodes'][j]['modelId'],\n                        'fine_class':\n                        modelCategoryMapping[data['levels'][i]['nodes'][j][\n                            'modelId']]['fine_grained_class'],\n                        'coarse_class':\n                        modelCategoryMapping[data['levels'][i]['nodes'][j][\n                            'modelId']]['coarse_grained_class'],\n                        'bbox':\n                        data['levels'][i]['nodes'][j]['bbox'],\n                        'mat':\n                        material\n                    }\n\n        return rooms, objects\n\n    # Spawn at a randomly selected point in a particular room\n    def spawn_room(self, room=None):\n        if room == None:\n            return False, None\n\n        target_room = '_'.join(room.lower().split(' '))\n\n        if self.env.house.hasRoomType(target_room) == False:\n            return False, None\n\n        rooms = self.env.house._getRooms(target_room)\n        room = np.random.choice(rooms)\n\n        gx1, gy1, gx2, gy2 = self.env.house._getRoomBounds(room)\n\n        available_coords = []\n        for x in range(gx1, gx2 + 1):\n            for y in range(gy1, gy2 + 1):\n                if self.env.house.moveMap[x, y] > 0:\n                    available_coords.append((x, y))\n\n        # print(available_coords)\n        spawn_coord_idx = np.random.choice(len(available_coords))\n        spawn_coord = available_coords[spawn_coord_idx]\n\n        return spawn_coord, room\n\n    # Spawn close to an object\n    # If room given, look for object within room\n    def spawn_object(self, obj=None, room=None):\n        if object == None:\n            return False, None\n\n        if isinstance(obj, list) == False:\n            obj = [obj]\n\n        is_door = False\n        if 'door' in obj:\n            is_door = True\n\n        target_obj = ['_'.join(x.lower().split(' ')) for x in obj]\n\n        if room != None:\n            if 'nodeIndices' in room:\n                objs = [\n                    self.objects['0_' + str(x)] for x in room['nodeIndices']\n                    if self.objects['0_' + str(x)]['fine_class'] in target_obj\n                ]\n            else:\n                objs = [\n                    self.objects['0_' + str(x)] for x in room['nodes']\n                    if self.objects['0_' + str(x)]['fine_class'] in target_obj\n                ]\n        else:\n            obj_id_list = list(\n                itertools.chain.from_iterable(\n                    [x['nodes'] for x in self.rooms if x['type'] != []]))\n            objs = [\n                self.objects['0_' + str(x)] for x in obj_id_list\n                if self.objects['0_' + str(x)]['fine_class'] in target_obj\n            ]\n\n        if len(objs) == 0:\n            return False, None, None\n\n        obj_idx = np.random.choice(len(objs))\n        obj = objs[obj_idx]\n\n        self.target_obj_class = obj['fine_class'].lower()\n\n        gx1, gy1, gx2, gy2 = self.env.house._getRoomBounds(obj)\n\n        if room == None:\n            obj_node_idx = int(obj['id'][2:])\n            room = [\n                x for x in self.env.house.all_rooms\n                if 'nodeIndices' in x and obj_node_idx in x['nodeIndices']\n            ][0]\n\n        self.set_target_object(obj, room)\n\n        available_x, available_y = np.where(self.env.house.connMap == 0)\n\n        if len(available_x) == 0:\n            return False, None, None\n\n        spawn_coords = []\n        for i in range(len(available_x)):\n            spawn_coords.append((available_x[i], available_y[i]))\n\n        return spawn_coords, obj, room\n\n    # analogous to `setTargetRoom` in the House3D API\n    def set_target_object(self, obj, room):\n        object_tp = room['id'] + '_' + obj['id'] + '_' + obj['fine_class'].lower(\n        )\n        # Caching\n        if object_tp in self.env.house.connMapDict:\n            self.env.house.connMap, self.env.house.connectedCoors, self.env.house.inroomDist, self.env.house.maxConnDist = self.env.house.connMapDict[\n                object_tp]\n            return True  # object changed!\n        elif os.path.exists(\n                os.path.join(\n                    self.target_obj_conn_map_dir,\n                    self.env.house.house['id'] + '_' + object_tp + '.npy')):\n            self.env.house.connMap = np.load(\n                os.path.join(\n                    self.target_obj_conn_map_dir,\n                    self.env.house.house['id'] + '_' + object_tp + '.npy'))\n\n            if self.env.house.connMap.shape[0] == self.env.house.n_row+1:\n                self.env.house.connectedCoors, self.env.house.inroomDist, self.env.house.maxConnDist = None, None, None\n                return True\n\n        self.env.house.connMap = connMap = np.ones(\n            (self.env.house.n_row + 1, self.env.house.n_row + 1),\n            dtype=np.int32) * -1\n        self.env.house.inroomDist = inroomDist = np.ones(\n            (self.env.house.n_row + 1, self.env.house.n_row + 1),\n            dtype=np.float32) * -1\n        dirs = [[0, 1], [1, 0], [-1, 0], [0, -1]]\n        que = []\n        flag_find_open_components = True\n\n        _ox1, _, _oy1 = obj['bbox']['min']\n        _ox2, _, _oy2 = obj['bbox']['max']\n        ocx, ocy = (_ox1 + _ox2) / 2, (_oy1 + _oy2) / 2\n        ox1, oy1, ox2, oy2 = self.env.house.rescale(_ox1, _oy1, _ox2, _oy2)\n\n        for _ in range(2):\n            _x1, _, _y1 = room['bbox']['min']\n            _x2, _, _y2 = room['bbox']['max']\n            cx, cy = (_x1 + _x2) / 2, (_y1 + _y2) / 2\n            x1, y1, x2, y2 = self.env.house.rescale(_x1, _y1, _x2, _y2)\n\n            curr_components = self.env.house._find_components(\n                x1,\n                y1,\n                x2,\n                y2,\n                dirs=dirs,\n                return_open=flag_find_open_components\n            )  # find all the open components\n            if len(curr_components) == 0:\n                print('No space found! =(')\n                raise ValueError('no space')\n            if isinstance(curr_components[0],\n                          list):  # join all the coors in the open components\n                curr_major_coors = list(itertools.chain(*curr_components))\n            else:\n                curr_major_coors = curr_components\n            min_dist_to_center, min_dist_to_edge = 1e50, 1e50\n            for x, y in curr_major_coors:\n                ###\n                # Compute minimum dist to edge here\n                if x in range(ox1, ox2):\n                    dx = 0\n                elif x < ox1:\n                    dx = ox1 - x\n                else:\n                    dx = x - ox2\n\n                if y in range(oy1, oy2):\n                    dy = 0\n                elif y < oy1:\n                    dy = oy1 - y\n                else:\n                    dy = y - oy2\n\n                assert dx >= 0 and dy >= 0\n\n                if dx != 0 or dy != 0:\n                    dd = np.sqrt(dx**2 + dy**2)\n                elif dx == 0:\n                    dd = dy\n                else:\n                    dd = dx\n\n                if dd < min_dist_to_edge:\n                    min_dist_to_edge = int(np.ceil(dd))\n                ###\n                tx, ty = self.env.house.to_coor(x, y)\n                tdist = np.sqrt((tx - ocx)**2 + (ty - ocy)**2)\n                if tdist < min_dist_to_center:\n                    min_dist_to_center = tdist\n                inroomDist[x, y] = tdist\n            margin = min_dist_to_edge + 1\n            for x, y in curr_major_coors:\n                inroomDist[x, y] -= min_dist_to_center\n            for x, y in curr_major_coors:\n                if x in range(ox1 - margin, ox2 + margin) and y in range(\n                        oy1 - margin, oy2 + margin):\n                    connMap[x, y] = 0\n                    que.append((x, y))\n            if len(que) > 0: break\n            if flag_find_open_components:\n                flag_find_open_components = False\n            else:\n                break\n            raise ValueError\n\n        ptr = 0\n        self.env.house.maxConnDist = 1\n        while ptr < len(que):\n            x, y = que[ptr]\n            cur_dist = connMap[x, y]\n            ptr += 1\n            for dx, dy in dirs:\n                tx, ty = x + dx, y + dy\n                if self.env.house.inside(tx, ty) and self.env.house.canMove(\n                        tx, ty) and not self.env.house.isConnect(tx, ty):\n                    que.append((tx, ty))\n                    connMap[tx, ty] = cur_dist + 1\n                    if cur_dist + 1 > self.env.house.maxConnDist:\n                        self.env.house.maxConnDist = cur_dist + 1\n        self.env.house.connMapDict[object_tp] = (connMap, que, inroomDist,\n                                                 self.env.house.maxConnDist)\n        np.save(\n            os.path.join(\n                self.target_obj_conn_map_dir,\n                self.env.house.house['id'] + '_' + object_tp + '.npy'),\n            connMap)\n        self.connectedCoors = que\n        print(' >>>> ConnMap Cached!')\n        return True  # room changed!\n\n    def _load_semantic_classes(self, color_file=None):\n        if color_file == None:\n            color_file = self.env.config['colorFile']\n\n        self.semantic_classes = {}\n\n        with open(color_file) as csv_file:\n            reader = csv.DictReader(csv_file)\n            for row in reader:\n                c = np.array((row['r'], row['g'], row['b']), dtype=np.uint8)\n                fine_cat = row['name'].lower()\n                self.semantic_classes[fine_cat] = c\n\n        return self.semantic_classes\n\n    def _get_best_yaw_obj_from_pos(self, obj_id, grid_pos, height=1.0):\n        obj = self.objects[obj_id]\n        obj_fine_class = obj['fine_class']\n\n        cx, cy = self.env.house.to_coor(grid_pos[0], grid_pos[1])\n\n        self.env.cam.pos.x = cx\n        self.env.cam.pos.y = height\n        self.env.cam.pos.z = cy\n\n        best_yaw, best_coverage = None, 0\n\n        for yaw in self.angles:\n            self.env.cam.yaw = yaw\n            self.env.cam.updateDirection()\n\n            seg = self.env.render(mode='semantic')\n            c = self.semantic_classes[obj_fine_class.lower()]\n            mask = np.all(seg == c, axis=2)\n            coverage = np.sum(mask) / (seg.shape[0] * seg.shape[1])\n\n            if best_yaw == None:\n                best_yaw = yaw\n                best_coverage = coverage\n            else:\n                if coverage > best_coverage:\n                    best_yaw = yaw\n                    best_coverage = coverage\n\n        return best_yaw, best_coverage\n\n    def _get_best_view_obj(self,\n                           obj,\n                           coverage_thres=0.5,\n                           dist_add=0.5,\n                           robot_height=False):\n        bbox = obj['bbox']\n        obj_fine_class = obj['fine_class']\n\n        obj_max = np.asarray(bbox['max'])\n        obj_min = np.asarray(bbox['min'])\n        obj_center = (obj_min + obj_max) / 2\n\n        c_x, c_y, c_z = obj_center\n        max_radius = np.sqrt(\n            (obj_max[0] - obj_min[0]) * (obj_max[0] - obj_min[0]) +\n            (obj_max[2] - obj_min[2]) * (obj_max[2] - obj_min[2])) / 2.0\n        max_radius += dist_add\n\n        best_pos = None\n        best_coverage = 0\n\n        returned_pos_cov = []\n\n        for yaw in self.angles:\n            pos = [\n                c_x - max_radius * np.cos(yaw * (2 * np.pi) / 360.0), c_y,\n                c_z - max_radius * np.sin(yaw * (2 * np.pi) / 360.0), yaw\n            ]\n\n            if robot_height == True:\n                pos[1] = min(max(0.75, c_y), 2.00)\n\n            self.env.cam.pos.x = pos[0]\n            self.env.cam.pos.y = pos[1]\n            self.env.cam.pos.z = pos[2]\n            self.env.cam.yaw = pos[3]\n\n            self.env.cam.updateDirection()\n\n            seg = self.env.render(mode='semantic')\n            c = self.semantic_classes[obj_fine_class.lower()]\n            mask = np.all(seg == c, axis=2)\n            coverage = np.sum(mask) / (seg.shape[0] * seg.shape[1])\n\n            returned_pos_cov.append([pos, coverage])\n\n            if coverage > coverage_thres:\n                return pos, coverage, returned_pos_cov\n            elif coverage > best_coverage:\n                best_coverage = coverage\n                best_pos = pos\n\n        return best_pos, best_coverage, returned_pos_cov"
  },
  {
    "path": "utils/make_houses.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport json\nimport argparse\nimport subprocess\nimport shlex\nimport os\nimport multiprocessing\n\nparser = argparse.ArgumentParser(\n    description='Create obj+mtl files for the houses in the dataset.')\nparser.add_argument('-eqa_path', help='/path/to/eqa.json', required=True)\nparser.add_argument(\n    '-suncg_toolbox_path', help='/path/to/SUNCGtoolbox', required=True)\nparser.add_argument(\n    '-suncg_data_path', help='/path/to/suncg/data_root', required=True)\nparser.add_argument(\n    '-num_processes',\n    help='number of threads to use',\n    default=multiprocessing.cpu_count())\nargs = parser.parse_args()\n\neqa_data = json.load(open(args.eqa_path, 'r'))\nhouses = list(eqa_data['questions'].keys())\nstart_dir = os.getcwd()\n\n\ndef extract_threaded(house):\n    os.chdir(os.path.join(args.suncg_data_path, 'house', house))\n    subprocess.call(\n        shlex.split('%s house.json house.obj' % (os.path.join(\n            args.suncg_toolbox_path, 'gaps', 'bin', 'x86_64', 'scn2scn'), )))\n    print('extracted', house)\n\n\npool = multiprocessing.Pool(args.num_processes)\npool.map(extract_threaded, houses)"
  }
]