[
  {
    "path": ".idea/.gitignore",
    "content": "# Default ignored files\n/shelf/\n/workspace.xml\n# Datasource local storage ignored files\n/dataSources/\n/dataSources.local.xml\n# Editor-based HTTP Client requests\n/httpRequests/\n"
  },
  {
    "path": ".idea/GNN_biomarker_MEDIA.iml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<module type=\"PYTHON_MODULE\" version=\"4\">\n  <component name=\"NewModuleRootManager\">\n    <content url=\"file://$MODULE_DIR$\" />\n    <orderEntry type=\"jdk\" jdkName=\"li-cancer Remote Python 3.8.5 (sftp://xiaoxiaol@localhost:6000/data/xiaoxiaol/anaconda3/envs/cancergnn/bin/python)\" jdkType=\"Python SDK\" />\n    <orderEntry type=\"sourceFolder\" forTests=\"false\" />\n  </component>\n</module>"
  },
  {
    "path": ".idea/deployment.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"PublishConfigData\" serverName=\"li-gan\">\n    <serverData>\n      <paths name=\"ipag\">\n        <serverdata>\n          <mappings>\n            <mapping deploy=\"/data/xiaoxiaol/ipag/GNN_HBM/\" local=\"$PROJECT_DIR$/\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"li-gan\">\n        <serverdata>\n          <mappings>\n            <mapping deploy=\"/data/xiaoxiaol/ipag/GNN_HBM\" local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"xiaoxiaol@localhost:6000 password\">\n        <serverdata>\n          <mappings>\n            <mapping deploy=\"/data/xiaoxiaol/ipag/GNN_HBM\" local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n    </serverData>\n  </component>\n</project>"
  },
  {
    "path": ".idea/encodings.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"Encoding\" addBOMForNewFiles=\"with NO BOM\" />\n</project>"
  },
  {
    "path": ".idea/inspectionProfiles/Project_Default.xml",
    "content": "<component name=\"InspectionProjectProfileManager\">\n  <profile version=\"1.0\">\n    <option name=\"myName\" value=\"Project Default\" />\n    <inspection_tool class=\"PyPackageRequirementsInspection\" enabled=\"true\" level=\"WARNING\" enabled_by_default=\"true\">\n      <option name=\"ignoredPackages\">\n        <value>\n          <list size=\"200\">\n            <item index=\"0\" class=\"java.lang.String\" itemvalue=\"gensim\" />\n            <item index=\"1\" class=\"java.lang.String\" itemvalue=\"torch-scatter\" />\n            <item index=\"2\" class=\"java.lang.String\" itemvalue=\"unity-scope-colourlovers\" />\n            <item index=\"3\" class=\"java.lang.String\" itemvalue=\"scikit-learn\" />\n            <item index=\"4\" class=\"java.lang.String\" itemvalue=\"testpath\" />\n            <item index=\"5\" class=\"java.lang.String\" itemvalue=\"ufw\" />\n            <item index=\"6\" class=\"java.lang.String\" itemvalue=\"py\" />\n            <item index=\"7\" class=\"java.lang.String\" itemvalue=\"torchvision\" />\n            <item index=\"8\" class=\"java.lang.String\" itemvalue=\"catfish\" />\n            <item index=\"9\" class=\"java.lang.String\" itemvalue=\"ipython-genutils\" />\n            <item index=\"10\" class=\"java.lang.String\" itemvalue=\"bz2file\" />\n            <item index=\"11\" class=\"java.lang.String\" itemvalue=\"python-louvain\" />\n            <item index=\"12\" class=\"java.lang.String\" itemvalue=\"bleach\" />\n            <item index=\"13\" class=\"java.lang.String\" itemvalue=\"graphviz\" />\n            <item index=\"14\" class=\"java.lang.String\" itemvalue=\"lxml\" />\n            <item index=\"15\" class=\"java.lang.String\" itemvalue=\"language-selector\" />\n            <item index=\"16\" class=\"java.lang.String\" itemvalue=\"jsonschema\" />\n            <item index=\"17\" class=\"java.lang.String\" itemvalue=\"xlrd\" />\n            <item index=\"18\" class=\"java.lang.String\" itemvalue=\"Werkzeug\" />\n            <item index=\"19\" class=\"java.lang.String\" itemvalue=\"wordcloud\" />\n            <item index=\"20\" class=\"java.lang.String\" itemvalue=\"python-apt\" />\n            <item index=\"21\" class=\"java.lang.String\" itemvalue=\"click\" />\n            <item index=\"22\" class=\"java.lang.String\" itemvalue=\"pyxdg\" />\n            <item index=\"23\" class=\"java.lang.String\" itemvalue=\"tensorboard\" />\n            <item index=\"24\" class=\"java.lang.String\" itemvalue=\"imageio\" />\n            <item index=\"25\" class=\"java.lang.String\" itemvalue=\"matplotlib\" />\n            <item index=\"26\" class=\"java.lang.String\" itemvalue=\"Keras\" />\n            <item index=\"27\" class=\"java.lang.String\" itemvalue=\"Mako\" />\n            <item index=\"28\" class=\"java.lang.String\" itemvalue=\"idna\" />\n            <item index=\"29\" class=\"java.lang.String\" itemvalue=\"colorgram.py\" />\n            <item index=\"30\" class=\"java.lang.String\" itemvalue=\"networkx\" />\n            <item index=\"31\" class=\"java.lang.String\" itemvalue=\"pycurl\" />\n            <item index=\"32\" class=\"java.lang.String\" itemvalue=\"pluggy\" />\n            <item index=\"33\" class=\"java.lang.String\" itemvalue=\"torch-sparse\" />\n            <item index=\"34\" class=\"java.lang.String\" itemvalue=\"unity-scope-manpages\" />\n            <item index=\"35\" class=\"java.lang.String\" itemvalue=\"screen-resolution-extra\" />\n            <item index=\"36\" class=\"java.lang.String\" itemvalue=\"jupyter\" />\n            <item index=\"37\" class=\"java.lang.String\" itemvalue=\"PyWavelets\" />\n            <item index=\"38\" class=\"java.lang.String\" itemvalue=\"sessioninstaller\" />\n            <item index=\"39\" class=\"java.lang.String\" itemvalue=\"smart-open\" />\n            <item index=\"40\" class=\"java.lang.String\" itemvalue=\"prompt-toolkit\" />\n            <item index=\"41\" class=\"java.lang.String\" itemvalue=\"rcssmin\" />\n            <item index=\"42\" class=\"java.lang.String\" itemvalue=\"tensorflow-tensorboard\" />\n            <item index=\"43\" class=\"java.lang.String\" itemvalue=\"astor\" />\n            <item index=\"44\" class=\"java.lang.String\" itemvalue=\"pathlib2\" />\n            <item index=\"45\" class=\"java.lang.String\" itemvalue=\"unity-scope-devhelp\" />\n            <item index=\"46\" class=\"java.lang.String\" itemvalue=\"pytest-runner\" />\n            <item index=\"47\" class=\"java.lang.String\" itemvalue=\"unity-scope-tomboy\" />\n            <item index=\"48\" class=\"java.lang.String\" itemvalue=\"olefile\" />\n            <item index=\"49\" class=\"java.lang.String\" itemvalue=\"pytz\" />\n            <item index=\"50\" class=\"java.lang.String\" itemvalue=\"python-systemd\" />\n            <item index=\"51\" class=\"java.lang.String\" itemvalue=\"traitlets\" />\n            <item index=\"52\" class=\"java.lang.String\" itemvalue=\"absl-py\" />\n            <item index=\"53\" class=\"java.lang.String\" itemvalue=\"protobuf\" />\n            <item index=\"54\" class=\"java.lang.String\" itemvalue=\"joblib\" />\n            <item index=\"55\" class=\"java.lang.String\" itemvalue=\"lib\" />\n            <item index=\"56\" class=\"java.lang.String\" itemvalue=\"nltk\" />\n            <item index=\"57\" class=\"java.lang.String\" itemvalue=\"atomicwrites\" />\n            <item index=\"58\" class=\"java.lang.String\" itemvalue=\"pycups\" />\n            <item index=\"59\" class=\"java.lang.String\" itemvalue=\"unity-scope-zotero\" />\n            <item index=\"60\" class=\"java.lang.String\" itemvalue=\"gast\" />\n            <item index=\"61\" class=\"java.lang.String\" itemvalue=\"unity-scope-yelp\" />\n            <item index=\"62\" class=\"java.lang.String\" itemvalue=\"pyzmq\" />\n            <item index=\"63\" class=\"java.lang.String\" itemvalue=\"oauthlib\" />\n            <item index=\"64\" class=\"java.lang.String\" itemvalue=\"entrypoints\" />\n            <item index=\"65\" class=\"java.lang.String\" itemvalue=\"tensorflow-gpu\" />\n            <item index=\"66\" class=\"java.lang.String\" itemvalue=\"beautifulsoup4\" />\n            <item index=\"67\" class=\"java.lang.String\" itemvalue=\"argcomplete\" />\n            <item index=\"68\" class=\"java.lang.String\" itemvalue=\"cryptography\" />\n            <item index=\"69\" class=\"java.lang.String\" itemvalue=\"Theano\" />\n            <item index=\"70\" class=\"java.lang.String\" itemvalue=\"keras-vis\" />\n            <item index=\"71\" class=\"java.lang.String\" itemvalue=\"mugshot\" />\n            <item index=\"72\" class=\"java.lang.String\" itemvalue=\"widgetsnbextension\" />\n            <item index=\"73\" class=\"java.lang.String\" itemvalue=\"tensorly\" />\n            <item index=\"74\" class=\"java.lang.String\" itemvalue=\"numexpr\" />\n            <item index=\"75\" class=\"java.lang.String\" itemvalue=\"distro\" />\n            <item index=\"76\" class=\"java.lang.String\" itemvalue=\"defer\" />\n            <item index=\"77\" class=\"java.lang.String\" itemvalue=\"jupyter-core\" />\n            <item index=\"78\" class=\"java.lang.String\" itemvalue=\"pydot\" />\n            <item index=\"79\" class=\"java.lang.String\" itemvalue=\"menulibre\" />\n            <item index=\"80\" class=\"java.lang.String\" itemvalue=\"httplib2\" />\n            <item index=\"81\" class=\"java.lang.String\" itemvalue=\"wcwidth\" />\n            <item index=\"82\" class=\"java.lang.String\" itemvalue=\"apturl\" />\n            <item index=\"83\" class=\"java.lang.String\" itemvalue=\"Jinja2\" />\n            <item index=\"84\" class=\"java.lang.String\" itemvalue=\"Keras-Preprocessing\" />\n            <item index=\"85\" class=\"java.lang.String\" itemvalue=\"pytest-cov\" />\n            <item index=\"86\" class=\"java.lang.String\" itemvalue=\"torch-geometric\" />\n            <item index=\"87\" class=\"java.lang.String\" itemvalue=\"coverage\" />\n            <item index=\"88\" class=\"java.lang.String\" itemvalue=\"six\" />\n            <item index=\"89\" class=\"java.lang.String\" itemvalue=\"plainbox\" />\n            <item index=\"90\" class=\"java.lang.String\" itemvalue=\"system-service\" />\n            <item index=\"91\" class=\"java.lang.String\" itemvalue=\"parso\" />\n            <item index=\"92\" class=\"java.lang.String\" itemvalue=\"ipython\" />\n            <item index=\"93\" class=\"java.lang.String\" itemvalue=\"chardet\" />\n            <item index=\"94\" class=\"java.lang.String\" itemvalue=\"face-recognition-models\" />\n            <item index=\"95\" class=\"java.lang.String\" itemvalue=\"command-not-found\" />\n            <item index=\"96\" class=\"java.lang.String\" itemvalue=\"tabulate\" />\n            <item index=\"97\" class=\"java.lang.String\" itemvalue=\"PyYAML\" />\n            <item index=\"98\" class=\"java.lang.String\" itemvalue=\"pickleshare\" />\n            <item index=\"99\" class=\"java.lang.String\" itemvalue=\"SimpleCV\" />\n            <item index=\"100\" class=\"java.lang.String\" itemvalue=\"tables\" />\n            <item index=\"101\" class=\"java.lang.String\" itemvalue=\"Pygments\" />\n            <item index=\"102\" class=\"java.lang.String\" itemvalue=\"imutils\" />\n            <item index=\"103\" class=\"java.lang.String\" itemvalue=\"qtconsole\" />\n            <item index=\"104\" class=\"java.lang.String\" itemvalue=\"terminado\" />\n            <item index=\"105\" class=\"java.lang.String\" itemvalue=\"python-igraph\" />\n            <item index=\"106\" class=\"java.lang.String\" itemvalue=\"plyfile\" />\n            <item index=\"107\" class=\"java.lang.String\" itemvalue=\"torch-cluster\" />\n            <item index=\"108\" class=\"java.lang.String\" itemvalue=\"reportlab\" />\n            <item index=\"109\" class=\"java.lang.String\" itemvalue=\"jupyter-client\" />\n            <item index=\"110\" class=\"java.lang.String\" itemvalue=\"pexpect\" />\n            <item index=\"111\" class=\"java.lang.String\" itemvalue=\"ipykernel\" />\n            <item index=\"112\" class=\"java.lang.String\" itemvalue=\"nbconvert\" />\n            <item index=\"113\" class=\"java.lang.String\" itemvalue=\"attrs\" />\n            <item index=\"114\" class=\"java.lang.String\" itemvalue=\"psutil\" />\n            <item index=\"115\" class=\"java.lang.String\" itemvalue=\"svgwrite\" />\n            <item index=\"116\" class=\"java.lang.String\" itemvalue=\"jedi\" />\n            <item index=\"117\" class=\"java.lang.String\" itemvalue=\"numpy-groupies\" />\n            <item index=\"118\" class=\"java.lang.String\" itemvalue=\"padme\" />\n            <item index=\"119\" class=\"java.lang.String\" itemvalue=\"pygobject\" />\n            <item index=\"120\" class=\"java.lang.String\" itemvalue=\"msgpack\" />\n            <item index=\"121\" class=\"java.lang.String\" itemvalue=\"unity-scope-chromiumbookmarks\" />\n            <item index=\"122\" class=\"java.lang.String\" itemvalue=\"PyJWT\" />\n            <item index=\"123\" class=\"java.lang.String\" itemvalue=\"onboard\" />\n            <item index=\"124\" class=\"java.lang.String\" itemvalue=\"pydiffmap\" />\n            <item index=\"125\" class=\"java.lang.String\" itemvalue=\"pandocfilters\" />\n            <item index=\"126\" class=\"java.lang.String\" itemvalue=\"slimit\" />\n            <item index=\"127\" class=\"java.lang.String\" itemvalue=\"unity-scope-virtualbox\" />\n            <item index=\"128\" class=\"java.lang.String\" itemvalue=\"lightdm-gtk-greeter-settings\" />\n            <item index=\"129\" class=\"java.lang.String\" itemvalue=\"pyasn1\" />\n            <item index=\"130\" class=\"java.lang.String\" itemvalue=\"requests\" />\n            <item index=\"131\" class=\"java.lang.String\" itemvalue=\"nilearn\" />\n            <item index=\"132\" class=\"java.lang.String\" itemvalue=\"XlsxWriter\" />\n            <item index=\"133\" class=\"java.lang.String\" itemvalue=\"seaborn\" />\n            <item index=\"134\" class=\"java.lang.String\" itemvalue=\"cached-property\" />\n            <item index=\"135\" class=\"java.lang.String\" itemvalue=\"xgboost\" />\n            <item index=\"136\" class=\"java.lang.String\" itemvalue=\"ipywidgets\" />\n            <item index=\"137\" class=\"java.lang.String\" itemvalue=\"blinker\" />\n            <item index=\"138\" class=\"java.lang.String\" itemvalue=\"ubuntu-drivers-common\" />\n            <item index=\"139\" class=\"java.lang.String\" itemvalue=\"scipy\" />\n            <item index=\"140\" class=\"java.lang.String\" itemvalue=\"tornado\" />\n            <item index=\"141\" class=\"java.lang.String\" itemvalue=\"opencv-python\" />\n            <item index=\"142\" class=\"java.lang.String\" itemvalue=\"unity-scope-firefoxbookmarks\" />\n            <item index=\"143\" class=\"java.lang.String\" itemvalue=\"xkit\" />\n            <item index=\"144\" class=\"java.lang.String\" itemvalue=\"torch\" />\n            <item index=\"145\" class=\"java.lang.String\" itemvalue=\"mistune\" />\n            <item index=\"146\" class=\"java.lang.String\" itemvalue=\"pandas\" />\n            <item index=\"147\" class=\"java.lang.String\" itemvalue=\"shap\" />\n            <item index=\"148\" class=\"java.lang.String\" itemvalue=\"termcolor\" />\n            <item index=\"149\" class=\"java.lang.String\" itemvalue=\"torch-spline-conv\" />\n            <item index=\"150\" class=\"java.lang.String\" itemvalue=\"future\" />\n            <item index=\"151\" class=\"java.lang.String\" itemvalue=\"jupyter-console\" />\n            <item index=\"152\" class=\"java.lang.String\" itemvalue=\"unity-scope-texdoc\" />\n            <item index=\"153\" class=\"java.lang.String\" itemvalue=\"usb-creator\" />\n            <item index=\"154\" class=\"java.lang.String\" itemvalue=\"Pillow\" />\n            <item index=\"155\" class=\"java.lang.String\" itemvalue=\"html5lib\" />\n            <item index=\"156\" class=\"java.lang.String\" itemvalue=\"Brlapi\" />\n            <item index=\"157\" class=\"java.lang.String\" itemvalue=\"python-dateutil\" />\n            <item index=\"158\" class=\"java.lang.String\" itemvalue=\"MarkupSafe\" />\n            <item index=\"159\" class=\"java.lang.String\" itemvalue=\"feedparser\" />\n            <item index=\"160\" class=\"java.lang.String\" itemvalue=\"tflearn\" />\n            <item index=\"161\" class=\"java.lang.String\" itemvalue=\"msgpack-numpy\" />\n            <item index=\"162\" class=\"java.lang.String\" itemvalue=\"segraph\" />\n            <item index=\"163\" class=\"java.lang.String\" itemvalue=\"unattended-upgrades\" />\n            <item index=\"164\" class=\"java.lang.String\" itemvalue=\"Markdown\" />\n            <item index=\"165\" class=\"java.lang.String\" itemvalue=\"notebook\" />\n            <item index=\"166\" class=\"java.lang.String\" itemvalue=\"rpy2\" />\n            <item index=\"167\" class=\"java.lang.String\" itemvalue=\"boto\" />\n            <item index=\"168\" class=\"java.lang.String\" itemvalue=\"python-gnupg\" />\n            <item index=\"169\" class=\"java.lang.String\" itemvalue=\"tensorpack\" />\n            <item index=\"170\" class=\"java.lang.String\" itemvalue=\"ssh-import-id\" />\n            <item index=\"171\" class=\"java.lang.String\" itemvalue=\"unity-scope-openclipart\" />\n            <item index=\"172\" class=\"java.lang.String\" itemvalue=\"panorama\" />\n            <item index=\"173\" class=\"java.lang.String\" itemvalue=\"progressbar\" />\n            <item index=\"174\" class=\"java.lang.String\" itemvalue=\"virtualenv\" />\n            <item index=\"175\" class=\"java.lang.String\" itemvalue=\"Ubuntu-Make\" />\n            <item index=\"176\" class=\"java.lang.String\" itemvalue=\"Augmentor\" />\n            <item index=\"177\" class=\"java.lang.String\" itemvalue=\"enum34\" />\n            <item index=\"178\" class=\"java.lang.String\" itemvalue=\"checkbox-support\" />\n            <item index=\"179\" class=\"java.lang.String\" itemvalue=\"scikit-image\" />\n            <item index=\"180\" class=\"java.lang.String\" itemvalue=\"guacamole\" />\n            <item index=\"181\" class=\"java.lang.String\" itemvalue=\"ptyprocess\" />\n            <item index=\"182\" class=\"java.lang.String\" itemvalue=\"more-itertools\" />\n            <item index=\"183\" class=\"java.lang.String\" itemvalue=\"simplegeneric\" />\n            <item index=\"184\" class=\"java.lang.String\" itemvalue=\"python-debian\" />\n            <item index=\"185\" class=\"java.lang.String\" itemvalue=\"python-resize-image\" />\n            <item index=\"186\" class=\"java.lang.String\" itemvalue=\"louis\" />\n            <item index=\"187\" class=\"java.lang.String\" itemvalue=\"urllib3\" />\n            <item index=\"188\" class=\"java.lang.String\" itemvalue=\"Cython\" />\n            <item index=\"189\" class=\"java.lang.String\" itemvalue=\"unity-scope-gdrive\" />\n            <item index=\"190\" class=\"java.lang.String\" itemvalue=\"pytest\" />\n            <item index=\"191\" class=\"java.lang.String\" itemvalue=\"nbformat\" />\n            <item index=\"192\" class=\"java.lang.String\" itemvalue=\"xdiagnose\" />\n            <item index=\"193\" class=\"java.lang.String\" itemvalue=\"Keras-Applications\" />\n            <item index=\"194\" class=\"java.lang.String\" itemvalue=\"scikit-plot\" />\n            <item index=\"195\" class=\"java.lang.String\" itemvalue=\"tqdm\" />\n            <item index=\"196\" class=\"java.lang.String\" itemvalue=\"grpcio\" />\n            <item index=\"197\" class=\"java.lang.String\" itemvalue=\"deepdish\" />\n            <item index=\"198\" class=\"java.lang.String\" itemvalue=\"unity-scope-calculator\" />\n            <item index=\"199\" class=\"java.lang.String\" itemvalue=\"ply\" />\n          </list>\n        </value>\n      </option>\n    </inspection_tool>\n  </profile>\n</component>"
  },
  {
    "path": ".idea/misc.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"ProjectRootManager\" version=\"2\" project-jdk-name=\"li-cancer Remote Python 3.8.5 (sftp://xiaoxiaol@localhost:6000/data/xiaoxiaol/anaconda3/envs/cancergnn/bin/python)\" project-jdk-type=\"Python SDK\" />\n  <component name=\"PyCharmProfessionalAdvertiser\">\n    <option name=\"shown\" value=\"true\" />\n  </component>\n</project>"
  },
  {
    "path": ".idea/modules.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"ProjectModuleManager\">\n    <modules>\n      <module fileurl=\"file://$PROJECT_DIR$/.idea/GNN_biomarker_MEDIA.iml\" filepath=\"$PROJECT_DIR$/.idea/GNN_biomarker_MEDIA.iml\" />\n    </modules>\n  </component>\n</project>"
  },
  {
    "path": ".idea/webServers.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"WebServers\">\n    <option name=\"servers\">\n      <webServer id=\"58b70e43-c401-48fa-983a-32280c016f57\" name=\"ipag\">\n        <fileTransfer accessType=\"SFTP\" host=\"localhost\" port=\"6000\" sshConfigId=\"dba9c212-8899-4954-a857-6abbb7000465\" sshConfig=\"xiaoxiaol@localhost:6000 password\">\n          <advancedOptions>\n            <advancedOptions dataProtectionLevel=\"Private\" passiveMode=\"true\" shareSSLContext=\"true\" />\n          </advancedOptions>\n        </fileTransfer>\n      </webServer>\n    </option>\n  </component>\n</project>"
  },
  {
    "path": "01-fetch_data.py",
    "content": "# Copyright (c) 2019 Mwiza Kunda\n# Copyright (C) 2017 Sarah Parisot <s.parisot@imperial.ac.uk>, , Sofia Ira Ktena <ira.ktena@imperial.ac.uk>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\n'''\nThis script mainly refers to https://github.com/kundaMwiza/fMRI-site-adaptation/blob/master/fetch_data.py\n'''\n\nfrom nilearn import datasets\nimport argparse\nfrom imports import preprocess_data as Reader\nimport os\nimport shutil\nimport sys\n\n# Input data variables\ncode_folder = os.getcwd()\nroot_folder = '/data/'\ndata_folder = os.path.join(root_folder, 'ABIDE_pcp/cpac/filt_noglobal/')\nif not os.path.exists(data_folder):\n    os.makedirs(data_folder)\nshutil.copyfile(os.path.join(root_folder,'subject_ID.txt'), os.path.join(data_folder, 'subject_IDs.txt'))\n\ndef str2bool(v):\n    if isinstance(v, bool):\n        return v\n    if v.lower() in ('yes', 'true', 't', 'y', '1'):\n        return True\n    elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n        return False\n    else:\n        raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Download ABIDE data and compute functional connectivity matrices')\n    parser.add_argument('--pipeline', default='cpac', type=str,\n                        help='Pipeline to preprocess ABIDE data. Available options are ccs, cpac, dparsf and niak.'\n                             ' default: cpac.')\n    parser.add_argument('--atlas', default='cc200',\n                        help='Brain parcellation atlas. Options: ho, cc200 and cc400, default: cc200.')\n    parser.add_argument('--download', default=True, type=str2bool,\n                        help='Dowload data or just compute functional connectivity. default: True')\n    args = parser.parse_args()\n    print(args)\n\n    params = dict()\n\n    pipeline = args.pipeline\n    atlas = args.atlas\n    download = args.download\n\n    # Files to fetch\n\n    files = ['rois_' + atlas]\n\n    filemapping = {'func_preproc': 'func_preproc.nii.gz',\n                   files[0]: files[0] + '.1D'}\n\n\n    # Download database files\n    if download == True:\n        abide = datasets.fetch_abide_pcp(data_dir=root_folder, pipeline=pipeline,\n                                         band_pass_filtering=True, global_signal_regression=False, derivatives=files,\n                                         quality_checked=False)\n\n    subject_IDs = Reader.get_ids() #changed path to data path\n    subject_IDs = subject_IDs.tolist()\n\n    # Create a folder for each subject\n    for s, fname in zip(subject_IDs, Reader.fetch_filenames(subject_IDs, files[0], atlas)):\n        subject_folder = os.path.join(data_folder, s)\n        if not os.path.exists(subject_folder):\n            os.mkdir(subject_folder)\n\n        # Get the base filename for each subject\n        base = fname.split(files[0])[0]\n\n        # Move each subject file to the subject folder\n        for fl in files:\n            if not os.path.exists(os.path.join(subject_folder, base + filemapping[fl])):\n                shutil.move(base + filemapping[fl], subject_folder)\n\n    time_series = Reader.get_timeseries(subject_IDs, atlas)\n\n    # Compute and save connectivity matrices\n    Reader.subject_connectivity(time_series, subject_IDs, atlas, 'correlation')\n    Reader.subject_connectivity(time_series, subject_IDs, atlas, 'partial correlation')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "02-process_data.py",
    "content": "# Copyright (c) 2019 Mwiza Kunda\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\n\nimport sys\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom imports import preprocess_data as Reader\nimport deepdish as dd\nimport warnings\nimport os\n\nwarnings.filterwarnings(\"ignore\")\nroot_folder = '/data/'\ndata_folder = os.path.join(root_folder, 'ABIDE_pcp/cpac/filt_noglobal/')\n\n# Process boolean command line arguments\ndef str2bool(v):\n    if isinstance(v, bool):\n        return v\n    if v.lower() in ('yes', 'true', 't', 'y', '1'):\n        return True\n    elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n        return False\n    else:\n        raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Classification of the ABIDE dataset using a Ridge classifier. '\n                                                 'MIDA is used to minimize the distribution mismatch between ABIDE sites')\n    parser.add_argument('--atlas', default='cc200',\n                        help='Atlas for network construction (node definition) options: ho, cc200, cc400, default: cc200.')\n    parser.add_argument('--seed', default=123, type=int, help='Seed for random initialisation. default: 1234.')\n    parser.add_argument('--nclass', default=2, type=int, help='Number of classes. default:2')\n\n\n    args = parser.parse_args()\n    print('Arguments: \\n', args)\n\n\n    params = dict()\n\n    params['seed'] = args.seed  # seed for random initialisation\n\n    # Algorithm choice\n    params['atlas'] = args.atlas  # Atlas for network construction\n    atlas = args.atlas  # Atlas for network construction (node definition)\n\n    # Get subject IDs and class labels\n    subject_IDs = Reader.get_ids()\n    labels = Reader.get_subject_score(subject_IDs, score='DX_GROUP')\n\n    # Number of subjects and classes for binary classification\n    num_classes = args.nclass\n    num_subjects = len(subject_IDs)\n    params['n_subjects'] = num_subjects\n\n    # Initialise variables for class labels and acquisition sites\n    # 1 is autism, 2 is control\n    y_data = np.zeros([num_subjects, num_classes]) # n x 2\n    y = np.zeros([num_subjects, 1]) # n x 1\n\n    # Get class labels for all subjects\n    for i in range(num_subjects):\n        y_data[i, int(labels[subject_IDs[i]]) - 1] = 1\n        y[i] = int(labels[subject_IDs[i]])\n\n    # Compute feature vectors (vectorised connectivity networks)\n    fea_corr = Reader.get_networks(subject_IDs, iter_no='', kind='correlation', atlas_name=atlas) #(1035, 200, 200)\n    fea_pcorr = Reader.get_networks(subject_IDs, iter_no='', kind='partial correlation', atlas_name=atlas) #(1035, 200, 200)\n\n    if not os.path.exists(os.path.join(data_folder,'raw')):\n        os.makedirs(os.path.join(data_folder,'raw'))\n    for i, subject in enumerate(subject_IDs):\n        dd.io.save(os.path.join(data_folder,'raw',subject+'.h5'),{'corr':fea_corr[i],'pcorr':fea_pcorr[i],'label':y[i]%2})\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "03-main.py",
    "content": "import os\nimport numpy as np\nimport argparse\nimport time\nimport copy\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.optim import lr_scheduler\nfrom tensorboardX import SummaryWriter\n\nfrom imports.ABIDEDataset import ABIDEDataset\nfrom torch_geometric.data import DataLoader\nfrom net.braingnn import Network\nfrom imports.utils import train_val_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix\n\ntorch.manual_seed(123)\n\nEPS = 1e-10\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epoch', type=int, default=0, help='starting epoch')\nparser.add_argument('--n_epochs', type=int, default=100, help='number of epochs of training')\nparser.add_argument('--batchSize', type=int, default=100, help='size of the batches')\nparser.add_argument('--dataroot', type=str, default='/home/azureuser/projects/BrainGNN/data/ABIDE_pcp/cpac/filt_noglobal', help='root directory of the dataset')\nparser.add_argument('--fold', type=int, default=0, help='training which fold')\nparser.add_argument('--lr', type = float, default=0.01, help='learning rate')\nparser.add_argument('--stepsize', type=int, default=20, help='scheduler step size')\nparser.add_argument('--gamma', type=float, default=0.5, help='scheduler shrinking rate')\nparser.add_argument('--weightdecay', type=float, default=5e-3, help='regularization')\nparser.add_argument('--lamb0', type=float, default=1, help='classification loss weight')\nparser.add_argument('--lamb1', type=float, default=0, help='s1 unit regularization')\nparser.add_argument('--lamb2', type=float, default=0, help='s2 unit regularization')\nparser.add_argument('--lamb3', type=float, default=0.1, help='s1 entropy regularization')\nparser.add_argument('--lamb4', type=float, default=0.1, help='s2 entropy regularization')\nparser.add_argument('--lamb5', type=float, default=0.1, help='s1 consistence regularization')\nparser.add_argument('--layer', type=int, default=2, help='number of GNN layers')\nparser.add_argument('--ratio', type=float, default=0.5, help='pooling ratio')\nparser.add_argument('--indim', type=int, default=200, help='feature dim')\nparser.add_argument('--nroi', type=int, default=200, help='num of ROIs')\nparser.add_argument('--nclass', type=int, default=2, help='num of classes')\nparser.add_argument('--load_model', type=bool, default=False)\nparser.add_argument('--save_model', type=bool, default=True)\nparser.add_argument('--optim', type=str, default='Adam', help='optimization method: SGD, Adam')\nparser.add_argument('--save_path', type=str, default='./model/', help='path to save model')\nopt = parser.parse_args()\n\nif not os.path.exists(opt.save_path):\n    os.makedirs(opt.save_path)\n\n#################### Parameter Initialization #######################\npath = opt.dataroot\nname = 'ABIDE'\nsave_model = opt.save_model\nload_model = opt.load_model\nopt_method = opt.optim\nnum_epoch = opt.n_epochs\nfold = opt.fold\nwriter = SummaryWriter(os.path.join('./log',str(fold)))\n\n\n\n################## Define Dataloader ##################################\n\ndataset = ABIDEDataset(path,name)\ndataset.data.y = dataset.data.y.squeeze()\ndataset.data.x[dataset.data.x == float('inf')] = 0\n\ntr_index,val_index,te_index = train_val_test_split(fold=fold)\ntrain_dataset = dataset[tr_index]\nval_dataset = dataset[val_index]\ntest_dataset = dataset[te_index]\n\n\ntrain_loader = DataLoader(train_dataset,batch_size=opt.batchSize, shuffle= True)\nval_loader = DataLoader(val_dataset, batch_size=opt.batchSize, shuffle=False)\ntest_loader = DataLoader(test_dataset, batch_size=opt.batchSize, shuffle=False)\n\n\n\n############### Define Graph Deep Learning Network ##########################\nmodel = Network(opt.indim,opt.ratio,opt.nclass).to(device)\nprint(model)\n\nif opt_method == 'Adam':\n    optimizer = torch.optim.Adam(model.parameters(), lr= opt.lr, weight_decay=opt.weightdecay)\nelif opt_method == 'SGD':\n    optimizer = torch.optim.SGD(model.parameters(), lr =opt.lr, momentum = 0.9, weight_decay=opt.weightdecay, nesterov = True)\n\nscheduler = lr_scheduler.StepLR(optimizer, step_size=opt.stepsize, gamma=opt.gamma)\n\n############################### Define Other Loss Functions ########################################\ndef topk_loss(s,ratio):\n    if ratio > 0.5:\n        ratio = 1-ratio\n    s = s.sort(dim=1).values\n    res =  -torch.log(s[:,-int(s.size(1)*ratio):]+EPS).mean() -torch.log(1-s[:,:int(s.size(1)*ratio)]+EPS).mean()\n    return res\n\n\ndef consist_loss(s):\n    if len(s) == 0:\n        return 0\n    s = torch.sigmoid(s)\n    W = torch.ones(s.shape[0],s.shape[0])\n    D = torch.eye(s.shape[0])*torch.sum(W,dim=1)\n    L = D-W\n    L = L.to(device)\n    res = torch.trace(torch.transpose(s,0,1) @ L @ s)/(s.shape[0]*s.shape[0])\n    return res\n\n###################### Network Training Function#####################################\ndef train(epoch):\n    print('train...........')\n    scheduler.step()\n\n    for param_group in optimizer.param_groups:\n        print(\"LR\", param_group['lr'])\n    model.train()\n    s1_list = []\n    s2_list = []\n    loss_all = 0\n    step = 0\n    for data in train_loader:\n        data = data.to(device)\n        optimizer.zero_grad()\n        output, w1, w2, s1, s2 = model(data.x, data.edge_index, data.batch, data.edge_attr, data.pos)\n        s1_list.append(s1.view(-1).detach().cpu().numpy())\n        s2_list.append(s2.view(-1).detach().cpu().numpy())\n\n        loss_c = F.nll_loss(output, data.y)\n\n        loss_p1 = (torch.norm(w1, p=2)-1) ** 2\n        loss_p2 = (torch.norm(w2, p=2)-1) ** 2\n        loss_tpk1 = topk_loss(s1,opt.ratio)\n        loss_tpk2 = topk_loss(s2,opt.ratio)\n        loss_consist = 0\n        for c in range(opt.nclass):\n            loss_consist += consist_loss(s1[data.y == c])\n        loss = opt.lamb0*loss_c + opt.lamb1 * loss_p1 + opt.lamb2 * loss_p2 \\\n                   + opt.lamb3 * loss_tpk1 + opt.lamb4 *loss_tpk2 + opt.lamb5* loss_consist\n        writer.add_scalar('train/classification_loss', loss_c, epoch*len(train_loader)+step)\n        writer.add_scalar('train/unit_loss1', loss_p1, epoch*len(train_loader)+step)\n        writer.add_scalar('train/unit_loss2', loss_p2, epoch*len(train_loader)+step)\n        writer.add_scalar('train/TopK_loss1', loss_tpk1, epoch*len(train_loader)+step)\n        writer.add_scalar('train/TopK_loss2', loss_tpk2, epoch*len(train_loader)+step)\n        writer.add_scalar('train/GCL_loss', loss_consist, epoch*len(train_loader)+step)\n        step = step + 1\n\n        loss.backward()\n        loss_all += loss.item() * data.num_graphs\n        optimizer.step()\n\n        s1_arr = np.hstack(s1_list)\n        s2_arr = np.hstack(s2_list)\n    return loss_all / len(train_dataset), s1_arr, s2_arr ,w1,w2\n\n\n###################### Network Testing Function#####################################\ndef test_acc(loader):\n    model.eval()\n    correct = 0\n    for data in loader:\n        data = data.to(device)\n        outputs= model(data.x, data.edge_index, data.batch, data.edge_attr,data.pos)\n        pred = outputs[0].max(dim=1)[1]\n        correct += pred.eq(data.y).sum().item()\n\n    return correct / len(loader.dataset)\n\ndef test_loss(loader,epoch):\n    print('testing...........')\n    model.eval()\n    loss_all = 0\n    for data in loader:\n        data = data.to(device)\n        output, w1, w2, s1, s2= model(data.x, data.edge_index, data.batch, data.edge_attr,data.pos)\n        loss_c = F.nll_loss(output, data.y)\n\n        loss_p1 = (torch.norm(w1, p=2)-1) ** 2\n        loss_p2 = (torch.norm(w2, p=2)-1) ** 2\n        loss_tpk1 = topk_loss(s1,opt.ratio)\n        loss_tpk2 = topk_loss(s2,opt.ratio)\n        loss_consist = 0\n        for c in range(opt.nclass):\n            loss_consist += consist_loss(s1[data.y == c])\n        loss = opt.lamb0*loss_c + opt.lamb1 * loss_p1 + opt.lamb2 * loss_p2 \\\n                   + opt.lamb3 * loss_tpk1 + opt.lamb4 *loss_tpk2 + opt.lamb5* loss_consist\n\n        loss_all += loss.item() * data.num_graphs\n    return loss_all / len(loader.dataset)\n\n#######################################################################################\n############################   Model Training #########################################\n#######################################################################################\nbest_model_wts = copy.deepcopy(model.state_dict())\nbest_loss = 1e10\nfor epoch in range(0, num_epoch):\n    since  = time.time()\n    tr_loss, s1_arr, s2_arr, w1, w2 = train(epoch)\n    tr_acc = test_acc(train_loader)\n    val_acc = test_acc(val_loader)\n    val_loss = test_loss(val_loader,epoch)\n    time_elapsed = time.time() - since\n    print('*====**')\n    print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n    print('Epoch: {:03d}, Train Loss: {:.7f}, '\n          'Train Acc: {:.7f}, Test Loss: {:.7f}, Test Acc: {:.7f}'.format(epoch, tr_loss,\n                                                       tr_acc, val_loss, val_acc))\n\n    writer.add_scalars('Acc',{'train_acc':tr_acc,'val_acc':val_acc},  epoch)\n    writer.add_scalars('Loss', {'train_loss': tr_loss, 'val_loss': val_loss},  epoch)\n    writer.add_histogram('Hist/hist_s1', s1_arr, epoch)\n    writer.add_histogram('Hist/hist_s2', s2_arr, epoch)\n\n    if val_loss < best_loss and epoch > 5:\n        print(\"saving best model\")\n        best_loss = val_loss\n        best_model_wts = copy.deepcopy(model.state_dict())\n        if save_model:\n            torch.save(best_model_wts, os.path.join(opt.save_path,str(fold)+'.pth'))\n\n#######################################################################################\n######################### Testing on testing set ######################################\n#######################################################################################\n\nif opt.load_model:\n    model = Network(opt.indim,opt.ratio,opt.nclass).to(device)\n    model.load_state_dict(torch.load(os.path.join(opt.save_path,str(fold)+'.pth')))\n    model.eval()\n    preds = []\n    correct = 0\n    for data in val_loader:\n        data = data.to(device)\n        outputs= model(data.x, data.edge_index, data.batch, data.edge_attr,data.pos)\n        pred = outputs[0].max(1)[1]\n        preds.append(pred.cpu().detach().numpy())\n        correct += pred.eq(data.y).sum().item()\n    preds = np.concatenate(preds,axis=0)\n    trues = val_dataset.data.y.cpu().detach().numpy()\n    cm = confusion_matrix(trues,preds)\n    print(\"Confusion matrix\")\n    print(classification_report(trues, preds))\n\nelse:\n   model.load_state_dict(best_model_wts)\n   model.eval()\n   test_accuracy = test_acc(test_loader)\n   test_l= test_loss(test_loader,0)\n   print(\"===========================\")\n   print(\"Test Acc: {:.7f}, Test Loss: {:.7f} \".format(test_accuracy, test_l))\n   print(opt)\n\n"
  },
  {
    "path": "README.md",
    "content": "# Graph Neural Network for Brain Network Analysis\n A preliminary implementation of BrainGNN. The example presented here is on the public resting-state fMRI ABIDE for the convenience of development. This dataset was different from the ones used in our publication, which are cleaner task-fMRI. Still seeking solutions improve representation learning on the noisy data.\n\n\n## Usage\n### Setup\n**pip**\n\nSee the `requirements.txt` for environment configuration. \n```bash\npip install -r requirements.txt\n```\n**PYG**\n\nTo install pyg library, [please refer to the document](https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html)\n\n### Dataset \n**ABIDE**\n\nWe treat each fMRI as a brain graph. How to download and construct the graphs?\n```\npython 01-fetch_data.py\npython 02-process_data.py\n```\n\n### How to run classification?\nTraining and testing are integrated in file `main.py`. To run\n```\npython 03-main.py \n```\n\n\n## Citation\nIf you find the code and dataset useful, please cite our paper.\n```latex\n@article{li2020braingnn,\n  title={Braingnn: Interpretable brain graph neural network for fmri analysis},\n  author={Li, Xiaoxiao and Zhou,Yuan and Dvornek, Nicha and Zhang, Muhan and Gao, Siyuan and Zhuang, Juntang and Scheinost, Dustin and Staib, Lawrence and Ventola, Pamela and Duncan, James},\n  journal={bioRxiv},\n  year={2020},\n  publisher={Cold Spring Harbor Laboratory}\n}\n```\n"
  },
  {
    "path": "data/subject_ID.txt",
    "content": "50128\n51203\n50325\n50117\n50573\n50741\n50779\n51009\n50746\n50574\n50110\n50322\n51036\n51204\n50119\n50126\n50314\n51490\n50784\n51464\n51000\n51038\n50748\n51235\n51007\n51463\n50783\n50777\n50313\n50121\n51053\n51261\n50723\n50511\n51295\n50347\n50982\n50976\n51098\n51292\n50340\n50516\n50724\n51266\n51054\n50186\n50529\n50985\n50520\n50376\n50978\n50144\n51096\n50382\n51250\n51062\n50349\n51065\n50385\n51257\n50143\n51091\n50371\n50527\n51268\n50188\n50518\n50749\n51039\n50776\n50120\n50312\n51006\n51234\n50782\n51462\n50118\n51465\n50785\n51001\n50315\n50127\n51491\n51008\n50778\n51205\n50575\n50747\n50111\n50129\n50116\n50324\n50740\n50572\n51030\n51202\n50370\n50142\n51090\n50526\n51256\n51064\n50519\n50189\n51269\n51063\n50383\n51251\n50521\n50145\n51097\n50979\n50377\n50348\n51055\n50187\n51267\n51293\n50341\n50725\n51258\n50984\n50528\n50970\n50510\n50722\n51294\n50346\n51260\n51052\n51099\n50977\n50379\n50983\n50039\n50496\n51312\n50234\n50006\n50650\n50802\n50668\n51118\n50657\n50233\n51127\n51315\n50491\n50008\n50498\n50037\n50205\n50661\n51581\n50453\n50695\n51575\n51111\n51323\n51129\n50659\n51324\n51116\n51572\n50692\n50666\n50202\n50030\n51142\n51370\n50269\n51189\n50251\n50407\n50438\n51348\n50603\n50267\n51187\n50055\n51341\n50293\n51173\n51174\n51346\n50294\n51180\n50052\n50260\n50604\n50436\n50658\n51128\n50667\n50455\n50031\n50203\n51117\n51325\n50693\n51573\n50499\n50009\n51574\n50694\n51322\n51110\n50204\n50036\n51580\n50660\n50803\n50669\n51314\n51126\n50490\n50656\n50232\n50038\n50804\n50007\n50235\n50651\n50463\n50497\n51121\n51313\n50261\n51181\n50053\n50437\n50605\n51347\n50295\n51175\n50408\n51172\n51340\n50292\n50602\n51186\n50054\n50266\n50259\n50250\n50406\n51349\n50439\n50257\n51188\n50268\n51195\n50047\n50275\n50611\n51161\n51353\n50281\n51159\n51354\n50286\n51166\n50424\n50616\n50272\n51192\n50040\n50049\n51362\n51150\n50412\n50620\n50618\n50288\n51168\n50627\n50415\n50243\n51365\n50441\n50217\n50025\n50819\n51331\n51103\n51567\n50687\n50826\n51558\n51560\n51104\n51336\n50022\n50210\n50446\n51309\n50821\n51132\n51300\n51556\n50642\n50470\n50014\n51569\n50689\n50817\n50013\n50477\n50645\n50483\n51307\n51135\n50448\n51338\n51169\n50289\n50619\n51364\n51156\n50414\n50626\n50242\n50048\n50245\n50621\n50413\n51151\n51363\n50628\n50617\n50425\n51193\n50041\n50273\n51167\n51355\n50287\n51352\n50280\n51160\n50274\n51194\n50046\n50422\n50610\n50482\n51134\n51306\n50012\n50644\n51339\n50449\n50643\n50015\n51301\n51133\n50485\n51557\n50816\n50688\n51568\n50211\n50023\n50447\n51561\n51105\n50820\n51308\n51102\n51330\n50686\n51566\n50440\n50818\n50024\n50216\n51559\n50169\n50955\n50156\n51084\n50364\n50700\n50532\n51070\n50390\n51048\n50952\n50738\n50397\n51077\n50999\n50707\n50363\n51083\n50990\n50158\n50964\n51273\n51041\n50193\n50355\n50167\n50503\n50731\n50709\n50399\n51079\n50997\n50736\n50504\n50160\n51280\n50352\n51046\n50194\n51274\n51482\n50306\n50134\n51220\n51012\n51476\n50796\n50339\n50791\n51471\n51015\n51227\n50133\n50301\n50557\n51485\n51218\n50568\n51023\n51211\n50753\n50561\n50105\n50337\n51478\n50798\n50308\n50330\n50102\n50566\n50754\n51216\n51024\n50559\n51229\n50996\n51078\n50962\n50708\n51275\n51047\n50195\n50505\n50737\n51281\n50353\n50161\n50965\n50159\n50991\n50166\n50354\n50730\n50502\n51040\n50192\n51272\n50739\n51049\n50706\n50150\n51082\n50362\n50998\n51076\n50954\n50168\n50391\n51071\n50365\n50157\n51085\n50701\n51025\n51217\n50103\n50331\n50755\n50567\n51228\n50558\n50560\n50752\n50336\n50104\n51210\n50799\n51479\n50300\n50132\n50556\n51484\n51470\n50790\n51226\n51014\n50569\n51219\n51013\n51221\n50797\n51477\n50551\n51483\n50135\n50307\n50338\n50171\n50343\n51291\n50727\n50515\n50185\n51057\n51265\n50972\n50388\n50986\n51068\n51262\n50182\n51050\n51606\n50344\n51296\n50981\n50149\n51254\n50386\n50988\n51066\n50372\n50524\n51059\n50711\n50523\n51095\n50147\n50375\n51061\n51253\n50381\n51298\n51238\n50577\n50745\n50321\n50113\n51207\n51035\n51469\n50789\n50319\n51456\n51032\n50114\n50326\n50742\n50570\n51209\n51236\n50780\n51460\n50774\n50122\n50310\n51458\n50317\n50125\n51493\n50773\n51467\n50787\n51231\n51003\n51252\n50380\n51060\n50710\n50374\n51094\n50146\n51299\n51093\n50373\n50525\n51067\n50989\n51255\n50387\n50728\n51058\n50345\n51297\n50183\n51051\n51263\n51607\n50148\n50974\n51264\n50184\n51056\n50342\n50170\n50514\n50726\n51069\n50987\n50973\n51459\n50329\n50786\n51466\n51002\n51230\n50124\n50316\n50772\n51492\n50578\n51208\n50775\n50311\n50123\n51237\n51461\n50781\n50318\n50788\n51468\n50327\n50115\n50571\n50743\n51457\n51201\n51033\n51239\n51034\n51206\n50744\n50576\n50112\n50320\n50060\n50252\n50404\n51146\n50609\n50299\n51179\n51373\n51141\n50403\n50255\n50058\n50297\n51345\n51177\n50263\n50051\n51183\n50435\n50607\n51148\n50056\n51184\n50264\n51170\n50290\n51342\n50801\n51329\n50466\n50654\n51316\n51124\n50492\n51578\n50698\n50208\n51123\n51311\n50005\n50237\n50653\n51318\n50468\n51327\n50691\n51571\n50665\n51585\n50033\n50201\n50239\n50206\n50034\n51582\n51576\n50696\n51320\n51112\n50291\n51343\n51171\n50433\n50601\n50265\n50057\n51185\n50050\n51182\n50262\n50606\n50434\n50296\n51344\n51149\n50402\n50254\n51140\n50059\n51147\n50253\n50405\n51178\n50298\n50608\n50697\n51577\n51113\n51321\n50035\n50207\n50663\n51583\n50469\n51319\n51584\n50664\n50200\n50032\n51326\n51114\n51570\n50690\n50807\n50209\n50699\n51579\n50236\n50004\n50652\n50494\n51122\n51328\n50800\n51317\n50493\n50655\n50467\n50003\n51563\n50683\n51335\n51107\n50213\n50445\n51138\n50648\n50822\n50442\n50026\n50214\n51100\n51332\n51564\n50019\n50825\n50489\n50010\n50646\n50480\n51136\n51304\n51109\n51303\n51131\n50487\n50017\n50028\n50814\n50418\n51165\n50285\n51357\n50615\n50427\n50043\n51191\n50271\n50249\n50276\n50044\n51196\n50612\n50282\n51350\n51162\n51359\n50416\n50624\n50240\n51154\n50278\n51198\n51153\n51361\n50247\n50623\n50411\n50016\n51130\n51302\n50486\n50815\n50029\n50481\n51305\n51137\n50011\n50647\n50812\n51333\n51101\n51565\n50685\n50443\n50215\n50027\n50488\n50824\n50020\n50212\n50444\n50682\n51562\n51106\n51334\n50649\n50823\n51139\n51199\n50279\n50246\n50410\n50622\n51360\n51152\n51358\n50428\n51155\n50625\n50417\n50241\n50248\n51163\n50283\n51351\n50045\n51197\n50277\n50613\n50421\n50419\n51369\n50426\n50614\n50270\n50042\n51190\n50284\n51356\n51164\n51472\n50792\n51224\n51016\n50302\n50130\n51486\n50554\n51029\n51481\n50553\n50305\n51011\n51223\n50795\n50333\n50757\n50565\n51027\n51215\n51488\n51018\n51212\n51020\n50562\n50750\n50334\n50106\n51279\n50199\n50509\n51074\n50704\n51080\n50152\n50360\n50956\n50358\n50367\n51087\n50969\n50531\n50703\n51241\n51073\n50960\n50994\n51248\n50507\n50735\n50351\n50163\n51277\n50197\n51045\n50993\n50369\n51089\n50967\n50190\n51042\n50164\n50958\n50356\n50732\n50500\n50751\n50563\n50107\n50335\n51021\n51213\n51214\n51026\n50332\n50564\n50756\n51019\n51489\n51222\n51010\n51474\n50794\n51480\n50552\n50304\n50136\n50109\n50131\n50303\n51487\n50555\n50793\n51473\n51017\n51225\n51028\n50966\n51088\n50368\n50992\n50357\n50959\n50501\n50733\n51271\n50191\n51249\n50995\n50961\n50196\n51044\n51276\n50162\n50350\n51282\n50359\n50957\n51072\n51240\n50968\n51086\n50366\n50702\n50530\n50198\n51278\n50705\n50361\n51081\n50153\n51075\n"
  },
  {
    "path": "imports/ABIDEDataset.py",
    "content": "import torch\nfrom torch_geometric.data import InMemoryDataset,Data\nfrom os.path import join, isfile\nfrom os import listdir\nimport numpy as np\nimport os.path as osp\nfrom imports.read_abide_stats_parall import read_data\n\n\nclass ABIDEDataset(InMemoryDataset):\n    def __init__(self, root, name, transform=None, pre_transform=None):\n        self.root = root\n        self.name = name\n        super(ABIDEDataset, self).__init__(root,transform, pre_transform)\n        self.data, self.slices = torch.load(self.processed_paths[0])\n\n    @property\n    def raw_file_names(self):\n        data_dir = osp.join(self.root,'raw')\n        onlyfiles = [f for f in listdir(data_dir) if osp.isfile(osp.join(data_dir, f))]\n        onlyfiles.sort()\n        return onlyfiles\n    @property\n    def processed_file_names(self):\n        return  'data.pt'\n\n    def download(self):\n        # Download to `self.raw_dir`.\n        return\n\n    def process(self):\n        # Read data into huge `Data` list.\n        self.data, self.slices = read_data(self.raw_dir)\n\n        if self.pre_filter is not None:\n            data_list = [self.get(idx) for idx in range(len(self))]\n            data_list = [data for data in data_list if self.pre_filter(data)]\n            self.data, self.slices = self.collate(data_list)\n\n        if self.pre_transform is not None:\n            data_list = [self.get(idx) for idx in range(len(self))]\n            data_list = [self.pre_transform(data) for data in data_list]\n            self.data, self.slices = self.collate(data_list)\n\n        torch.save((self.data, self.slices), self.processed_paths[0])\n\n    def __repr__(self):\n        return '{}({})'.format(self.name, len(self))\n"
  },
  {
    "path": "imports/__inits__.py",
    "content": ""
  },
  {
    "path": "imports/gdc.py",
    "content": "import torch\nimport numba\nimport numpy as np\nfrom scipy.linalg import expm\nfrom torch_geometric.utils import add_self_loops, is_undirected, to_dense_adj\nfrom torch_sparse import coalesce\nfrom torch_scatter import scatter_add\n\n\ndef jit():\n    def decorator(func):\n        try:\n            return numba.jit(cache=True)(func)\n        except RuntimeError:\n            return numba.jit(cache=False)(func)\n\n    return decorator\n\n\nclass GDC(object):\n    r\"\"\"Processes the graph via Graph Diffusion Convolution (GDC) from the\n    `\"Diffusion Improves Graph Learning\" <https://www.kdd.in.tum.de/gdc>`_\n    paper.\n    .. note::\n        The paper offers additional advice on how to choose the\n        hyperparameters.\n        For an example of using GCN with GDC, see `examples/gcn.py\n        <https://github.com/rusty1s/pytorch_geometric/blob/master/examples/\n        gcn.py>`_.\n    Args:\n        self_loop_weight (float, optional): Weight of the added self-loop.\n            Set to :obj:`None` to add no self-loops. (default: :obj:`1`)\n        normalization_in (str, optional): Normalization of the transition\n            matrix on the original (input) graph. Possible values:\n            :obj:`\"sym\"`, :obj:`\"col\"`, and :obj:`\"row\"`.\n            See :func:`GDC.transition_matrix` for details.\n            (default: :obj:`\"sym\"`)\n        normalization_out (str, optional): Normalization of the transition\n            matrix on the transformed GDC (output) graph. Possible values:\n            :obj:`\"sym\"`, :obj:`\"col\"`, :obj:`\"row\"`, and :obj:`None`.\n            See :func:`GDC.transition_matrix` for details.\n            (default: :obj:`\"col\"`)\n        diffusion_kwargs (dict, optional): Dictionary containing the parameters\n            for diffusion.\n            `method` specifies the diffusion method (:obj:`\"ppr\"`,\n            :obj:`\"heat\"` or :obj:`\"coeff\"`).\n            Each diffusion method requires different additional parameters.\n            See :func:`GDC.diffusion_matrix_exact` or\n            :func:`GDC.diffusion_matrix_approx` for details.\n            (default: :obj:`dict(method='ppr', alpha=0.15)`)\n        sparsification_kwargs (dict, optional): Dictionary containing the\n            parameters for sparsification.\n            `method` specifies the sparsification method (:obj:`\"threshold\"` or\n            :obj:`\"topk\"`).\n            Each sparsification method requires different additional\n            parameters.\n            See :func:`GDC.sparsify_dense` for details.\n            (default: :obj:`dict(method='threshold', avg_degree=64)`)\n        exact (bool, optional): Whether to exactly calculate the diffusion\n            matrix.\n            Note that the exact variants are not scalable.\n            They densify the adjacency matrix and calculate either its inverse\n            or its matrix exponential.\n            However, the approximate variants do not support edge weights and\n            currently only personalized PageRank and sparsification by\n            threshold are implemented as fast, approximate versions.\n            (default: :obj:`True`)\n    :rtype: :class:`torch_geometric.data.Data`\n    \"\"\"\n    def __init__(self, self_loop_weight=1, normalization_in='sym',\n                 normalization_out='col',\n                 diffusion_kwargs=dict(method='ppr', alpha=0.15),\n                 sparsification_kwargs=dict(method='threshold',\n                                            avg_degree=64), exact=True):\n        self.self_loop_weight = self_loop_weight\n        self.normalization_in = normalization_in\n        self.normalization_out = normalization_out\n        self.diffusion_kwargs = diffusion_kwargs\n        self.sparsification_kwargs = sparsification_kwargs\n        self.exact = exact\n\n        if self_loop_weight:\n            assert exact or self_loop_weight == 1\n\n    @torch.no_grad()\n    def __call__(self, data):\n        N = data.num_nodes\n        edge_index = data.edge_index\n        if data.edge_attr is None:\n            edge_weight = torch.ones(edge_index.size(1),\n                                     device=edge_index.device)\n        else:\n            edge_weight = data.edge_attr\n            assert self.exact\n            assert edge_weight.dim() == 1\n\n        if self.self_loop_weight:\n            edge_index, edge_weight = add_self_loops(\n                edge_index, edge_weight, fill_value=self.self_loop_weight,\n                num_nodes=N)\n\n        edge_index, edge_weight = coalesce(edge_index, edge_weight, N, N)\n\n        if self.exact:\n            edge_index, edge_weight = self.transition_matrix(\n                edge_index, edge_weight, N, self.normalization_in)\n            diff_mat = self.diffusion_matrix_exact(edge_index, edge_weight, N,\n                                                   **self.diffusion_kwargs)\n            edge_index, edge_weight = self.sparsify_dense(\n                diff_mat, **self.sparsification_kwargs)\n        else:\n            edge_index, edge_weight = self.diffusion_matrix_approx(\n                edge_index, edge_weight, N, self.normalization_in,\n                **self.diffusion_kwargs)\n            edge_index, edge_weight = self.sparsify_sparse(\n                edge_index, edge_weight, N, **self.sparsification_kwargs)\n\n        edge_index, edge_weight = coalesce(edge_index, edge_weight, N, N)\n        edge_index, edge_weight = self.transition_matrix(\n            edge_index, edge_weight, N, self.normalization_out)\n\n        data.edge_index = edge_index\n        data.edge_attr = edge_weight\n\n        return data\n\n    def transition_matrix(self, edge_index, edge_weight, num_nodes,\n                          normalization):\n        r\"\"\"Calculate the approximate, sparse diffusion on a given sparse\n        matrix.\n        Args:\n            edge_index (LongTensor): The edge indices.\n            edge_weight (Tensor): One-dimensional edge weights.\n            num_nodes (int): Number of nodes.\n            normalization (str): Normalization scheme:\n                1. :obj:`\"sym\"`: Symmetric normalization\n                   :math:`\\mathbf{T} = \\mathbf{D}^{-1/2} \\mathbf{A}\n                   \\mathbf{D}^{-1/2}`.\n                2. :obj:`\"col\"`: Column-wise normalization\n                   :math:`\\mathbf{T} = \\mathbf{A} \\mathbf{D}^{-1}`.\n                3. :obj:`\"row\"`: Row-wise normalization\n                   :math:`\\mathbf{T} = \\mathbf{D}^{-1} \\mathbf{A}`.\n                4. :obj:`None`: No normalization.\n        :rtype: (:class:`LongTensor`, :class:`Tensor`)\n        \"\"\"\n        if normalization == 'sym':\n            row, col = edge_index\n            deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)\n            deg_inv_sqrt = deg.pow(-0.5)\n            deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0\n            edge_weight = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]\n        elif normalization == 'col':\n            _, col = edge_index\n            deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)\n            deg_inv = 1. / deg\n            deg_inv[deg_inv == float('inf')] = 0\n            edge_weight = edge_weight * deg_inv[col]\n        elif normalization == 'row':\n            row, _ = edge_index\n            deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)\n            deg_inv = 1. / deg\n            deg_inv[deg_inv == float('inf')] = 0\n            edge_weight = edge_weight * deg_inv[row]\n        elif normalization is None:\n            pass\n        else:\n            raise ValueError(\n                'Transition matrix normalization {} unknown.'.format(\n                    normalization))\n\n        return edge_index, edge_weight\n\n    def diffusion_matrix_exact(self, edge_index, edge_weight, num_nodes,\n                               method, **kwargs):\n        r\"\"\"Calculate the (dense) diffusion on a given sparse graph.\n        Note that these exact variants are not scalable. They densify the\n        adjacency matrix and calculate either its inverse or its matrix\n        exponential.\n        Args:\n            edge_index (LongTensor): The edge indices.\n            edge_weight (Tensor): One-dimensional edge weights.\n            num_nodes (int): Number of nodes.\n            method (str): Diffusion method:\n                1. :obj:`\"ppr\"`: Use personalized PageRank as diffusion.\n                   Additionally expects the parameter:\n                   - **alpha** (*float*) - Return probability in PPR.\n                     Commonly lies in :obj:`[0.05, 0.2]`.\n                2. :obj:`\"heat\"`: Use heat kernel diffusion.\n                   Additionally expects the parameter:\n                   - **t** (*float*) - Time of diffusion. Commonly lies in\n                     :obj:`[2, 10]`.\n                3. :obj:`\"coeff\"`: Freely choose diffusion coefficients.\n                   Additionally expects the parameter:\n                   - **coeffs** (*List[float]*) - List of coefficients\n                     :obj:`theta_k` for each power of the transition matrix\n                     (starting at :obj:`0`).\n        :rtype: (:class:`Tensor`)\n        \"\"\"\n        if method == 'ppr':\n            # α (I_n + (α - 1) A)^-1\n            edge_weight = (kwargs['alpha'] - 1) * edge_weight\n            edge_index, edge_weight = add_self_loops(edge_index, edge_weight,\n                                                     fill_value=1,\n                                                     num_nodes=num_nodes)\n            mat = to_dense_adj(edge_index, edge_attr=edge_weight).squeeze()\n            diff_matrix = kwargs['alpha'] * torch.inverse(mat)\n\n        elif method == 'heat':\n            # exp(t (A - I_n))\n            edge_index, edge_weight = add_self_loops(edge_index, edge_weight,\n                                                     fill_value=-1,\n                                                     num_nodes=num_nodes)\n            edge_weight = kwargs['t'] * edge_weight\n            mat = to_dense_adj(edge_index, edge_attr=edge_weight).squeeze()\n            undirected = is_undirected(edge_index, edge_weight, num_nodes)\n            diff_matrix = self.__expm__(mat, undirected)\n\n        elif method == 'coeff':\n            adj_matrix = to_dense_adj(edge_index,\n                                      edge_attr=edge_weight).squeeze()\n            mat = torch.eye(num_nodes, device=edge_index.device)\n\n            diff_matrix = kwargs['coeffs'][0] * mat\n            for coeff in kwargs['coeffs'][1:]:\n                mat = mat @ adj_matrix\n                diff_matrix += coeff * mat\n        else:\n            raise ValueError('Exact GDC diffusion {} unknown.'.format(method))\n\n        return diff_matrix\n\n    def diffusion_matrix_approx(self, edge_index, edge_weight, num_nodes,\n                                normalization, method, **kwargs):\n        r\"\"\"Calculate the approximate, sparse diffusion on a given sparse\n        graph.\n        Args:\n            edge_index (LongTensor): The edge indices.\n            edge_weight (Tensor): One-dimensional edge weights.\n            num_nodes (int): Number of nodes.\n            normalization (str): Transition matrix normalization scheme\n                (:obj:`\"sym\"`, :obj:`\"row\"`, or :obj:`\"col\"`).\n                See :func:`GDC.transition_matrix` for details.\n            method (str): Diffusion method:\n                1. :obj:`\"ppr\"`: Use personalized PageRank as diffusion.\n                   Additionally expects the parameters:\n                   - **alpha** (*float*) - Return probability in PPR.\n                     Commonly lies in :obj:`[0.05, 0.2]`.\n                   - **eps** (*float*) - Threshold for PPR calculation stopping\n                     criterion (:obj:`edge_weight >= eps * out_degree`).\n                     Recommended default: :obj:`1e-4`.\n        :rtype: (:class:`LongTensor`, :class:`Tensor`)\n        \"\"\"\n        if method == 'ppr':\n            if normalization == 'sym':\n                # Calculate original degrees.\n                _, col = edge_index\n                deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)\n\n            edge_index_np = edge_index.cpu().numpy()\n            # Assumes coalesced edge_index.\n            _, indptr, out_degree = np.unique(edge_index_np[0],\n                                              return_index=True,\n                                              return_counts=True)\n\n            neighbors, neighbor_weights = GDC.__calc_ppr__(\n                indptr, edge_index_np[1], out_degree, kwargs['alpha'],\n                kwargs['eps'])\n            ppr_normalization = 'col' if normalization == 'col' else 'row'\n            edge_index, edge_weight = self.__neighbors_to_graph__(\n                neighbors, neighbor_weights, ppr_normalization,\n                device=edge_index.device)\n            edge_index = edge_index.to(torch.long)\n\n            if normalization == 'sym':\n                # We can change the normalization from row-normalized to\n                # symmetric by multiplying the resulting matrix with D^{1/2}\n                # from the left and D^{-1/2} from the right.\n                # Since we use the original degrees for this it will be like\n                # we had used symmetric normalization from the beginning\n                # (except for errors due to approximation).\n                row, col = edge_index\n                deg_inv = deg.sqrt()\n                deg_inv_sqrt = deg.pow(-0.5)\n                deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0\n                edge_weight = deg_inv[row] * edge_weight * deg_inv_sqrt[col]\n            elif normalization in ['col', 'row']:\n                pass\n            else:\n                raise ValueError(\n                    ('Transition matrix normalization {} not implemented for '\n                     'non-exact GDC computation.').format(normalization))\n\n        elif method == 'heat':\n            raise NotImplementedError(\n                ('Currently no fast heat kernel is implemented. You are '\n                 'welcome to create one yourself, e.g., based on '\n                 '\"Kloster and Gleich: Heat kernel based community detection '\n                 '(KDD 2014).\"'))\n        else:\n            raise ValueError(\n                'Approximate GDC diffusion {} unknown.'.format(method))\n\n        return edge_index, edge_weight\n\n    def sparsify_dense(self, matrix, method, **kwargs):\n        r\"\"\"Sparsifies the given dense matrix.\n        Args:\n            matrix (Tensor): Matrix to sparsify.\n            num_nodes (int): Number of nodes.\n            method (str): Method of sparsification. Options:\n                1. :obj:`\"threshold\"`: Remove all edges with weights smaller\n                   than :obj:`eps`.\n                   Additionally expects one of these parameters:\n                   - **eps** (*float*) - Threshold to bound edges at.\n                   - **avg_degree** (*int*) - If :obj:`eps` is not given,\n                     it can optionally be calculated by calculating the\n                     :obj:`eps` required to achieve a given :obj:`avg_degree`.\n                2. :obj:`\"topk\"`: Keep edges with top :obj:`k` edge weights per\n                   node (column).\n                   Additionally expects the following parameters:\n                   - **k** (*int*) - Specifies the number of edges to keep.\n                   - **dim** (*int*) - The axis along which to take the top\n                     :obj:`k`.\n        :rtype: (:class:`LongTensor`, :class:`Tensor`)\n        \"\"\"\n        assert matrix.shape[0] == matrix.shape[1]\n        N = matrix.shape[1]\n\n        if method == 'threshold':\n            if 'eps' not in kwargs.keys():\n                kwargs['eps'] = self.__calculate_eps__(matrix, N,\n                                                       kwargs['avg_degree'])\n\n            edge_index = torch.nonzero(matrix >= kwargs['eps']).t()\n            edge_index_flat = edge_index[0] * N + edge_index[1]\n            edge_weight = matrix.flatten()[edge_index_flat]\n\n        elif method == 'topk':\n            assert kwargs['dim'] in [0, 1]\n            sort_idx = torch.argsort(matrix, dim=kwargs['dim'],\n                                     descending=True)\n            if kwargs['dim'] == 0:\n                top_idx = sort_idx[:kwargs['k']]\n                edge_weight = torch.gather(matrix, dim=kwargs['dim'],\n                                           index=top_idx).flatten()\n\n                row_idx = torch.arange(0, N, device=matrix.device).repeat(\n                    kwargs['k'])\n                edge_index = torch.stack([top_idx.flatten(), row_idx], dim=0)\n            else:\n                top_idx = sort_idx[:, :kwargs['k']]\n                edge_weight = torch.gather(matrix, dim=kwargs['dim'],\n                                           index=top_idx).flatten()\n\n                col_idx = torch.arange(\n                    0, N, device=matrix.device).repeat_interleave(kwargs['k'])\n                edge_index = torch.stack([col_idx, top_idx.flatten()], dim=0)\n        else:\n            raise ValueError('GDC sparsification {} unknown.'.format(method))\n\n        return edge_index, edge_weight\n\n    def sparsify_sparse(self, edge_index, edge_weight, num_nodes, method,\n                        **kwargs):\n        r\"\"\"Sparsifies a given sparse graph further.\n        Args:\n            edge_index (LongTensor): The edge indices.\n            edge_weight (Tensor): One-dimensional edge weights.\n            num_nodes (int): Number of nodes.\n            method (str): Method of sparsification:\n                1. :obj:`\"threshold\"`: Remove all edges with weights smaller\n                   than :obj:`eps`.\n                   Additionally expects one of these parameters:\n                   - **eps** (*float*) - Threshold to bound edges at.\n                   - **avg_degree** (*int*) - If :obj:`eps` is not given,\n                     it can optionally be calculated by calculating the\n                     :obj:`eps` required to achieve a given :obj:`avg_degree`.\n        :rtype: (:class:`LongTensor`, :class:`Tensor`)\n        \"\"\"\n        if method == 'threshold':\n            if 'eps' not in kwargs.keys():\n                kwargs['eps'] = self.__calculate_eps__(edge_weight, num_nodes,\n                                                       kwargs['avg_degree'])\n\n            remaining_edge_idx = torch.nonzero(\n                edge_weight >= kwargs['eps']).flatten()\n            edge_index = edge_index[:, remaining_edge_idx]\n            edge_weight = edge_weight[remaining_edge_idx]\n        elif method == 'topk':\n            raise NotImplementedError(\n                'Sparse topk sparsification not implemented.')\n        else:\n            raise ValueError('GDC sparsification {} unknown.'.format(method))\n\n        return edge_index, edge_weight\n\n    def __expm__(self, matrix, symmetric):\n        r\"\"\"Calculates matrix exponential.\n        Args:\n            matrix (Tensor): Matrix to take exponential of.\n            symmetric (bool): Specifies whether the matrix is symmetric.\n        :rtype: (:class:`Tensor`)\n        \"\"\"\n        if symmetric:\n            e, V = torch.symeig(matrix, eigenvectors=True)\n            diff_mat = V @ torch.diag(e.exp()) @ V.t()\n        else:\n            diff_mat_np = expm(matrix.cpu().numpy())\n            diff_mat = torch.Tensor(diff_mat_np).to(matrix.device)\n        return diff_mat\n\n    def __calculate_eps__(self, matrix, num_nodes, avg_degree):\n        r\"\"\"Calculates threshold necessary to achieve a given average degree.\n        Args:\n            matrix (Tensor): Adjacency matrix or edge weights.\n            num_nodes (int): Number of nodes.\n            avg_degree (int): Target average degree.\n        :rtype: (:class:`float`)\n        \"\"\"\n        sorted_edges = torch.sort(matrix.flatten(), descending=True).values\n        if avg_degree * num_nodes > len(sorted_edges):\n            return -np.inf\n        return sorted_edges[avg_degree * num_nodes - 1]\n\n    def __neighbors_to_graph__(self, neighbors, neighbor_weights,\n                               normalization='row', device='cpu'):\n        r\"\"\"Combine a list of neighbors and neighbor weights to create a sparse\n        graph.\n        Args:\n            neighbors (List[List[int]]): List of neighbors for each node.\n            neighbor_weights (List[List[float]]): List of weights for the\n                neighbors of each node.\n            normalization (str): Normalization of resulting matrix\n                (options: :obj:`\"row\"`, :obj:`\"col\"`). (default: :obj:`\"row\"`)\n            device (torch.device): Device to create output tensors on.\n                (default: :obj:`\"cpu\"`)\n        :rtype: (:class:`LongTensor`, :class:`Tensor`)\n        \"\"\"\n        edge_weight = torch.Tensor(np.concatenate(neighbor_weights)).to(device)\n        i = np.repeat(np.arange(len(neighbors)),\n                      np.fromiter(map(len, neighbors), dtype=np.int))\n        j = np.concatenate(neighbors)\n        if normalization == 'col':\n            edge_index = torch.Tensor(np.vstack([j, i])).to(device)\n            N = len(neighbors)\n            edge_index, edge_weight = coalesce(edge_index, edge_weight, N, N)\n        elif normalization == 'row':\n            edge_index = torch.Tensor(np.vstack([i, j])).to(device)\n        else:\n            raise ValueError(\n                f\"PPR matrix normalization {normalization} unknown.\")\n        return edge_index, edge_weight\n\n    @staticmethod\n    @jit()\n    def __calc_ppr__(indptr, indices, out_degree, alpha, eps):\n        r\"\"\"Calculate the personalized PageRank vector for all nodes\n        using a variant of the Andersen algorithm\n        (see Andersen et al. :Local Graph Partitioning using PageRank Vectors.)\n        Args:\n            indptr (np.ndarray): Index pointer for the sparse matrix\n                (CSR-format).\n            indices (np.ndarray): Indices of the sparse matrix entries\n                (CSR-format).\n            out_degree (np.ndarray): Out-degree of each node.\n            alpha (float): Alpha of the PageRank to calculate.\n            eps (float): Threshold for PPR calculation stopping criterion\n                (:obj:`edge_weight >= eps * out_degree`).\n        :rtype: (:class:`List[List[int]]`, :class:`List[List[float]]`)\n        \"\"\"\n        alpha_eps = alpha * eps\n        js = []\n        vals = []\n        for inode in range(len(out_degree)):\n            p = {inode: 0.0}\n            r = {}\n            r[inode] = alpha\n            q = [inode]\n            while len(q) > 0:\n                unode = q.pop()\n\n                res = r[unode] if unode in r else 0\n                if unode in p:\n                    p[unode] += res\n                else:\n                    p[unode] = res\n                r[unode] = 0\n                for vnode in indices[indptr[unode]:indptr[unode + 1]]:\n                    _val = (1 - alpha) * res / out_degree[unode]\n                    if vnode in r:\n                        r[vnode] += _val\n                    else:\n                        r[vnode] = _val\n\n                    res_vnode = r[vnode] if vnode in r else 0\n                    if res_vnode >= alpha_eps * out_degree[vnode]:\n                        if vnode not in q:\n                            q.append(vnode)\n            js.append(list(p.keys()))\n            vals.append(list(p.values()))\n        return js, vals\n\n    def __repr__(self):\n        return '{}()'.format(self.__class__.__name__)"
  },
  {
    "path": "imports/preprocess_data.py",
    "content": "# Copyright (c) 2019 Mwiza Kunda\n# Copyright (C) 2017 Sarah Parisot <s.parisot@imperial.ac.uk>, Sofia Ira Ktena <ira.ktena@imperial.ac.uk>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implcd ied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\n\nimport os\nimport warnings\nimport glob\nimport csv\nimport re\nimport numpy as np\nimport scipy.io as sio\nimport sys\nfrom nilearn import connectome\nimport pandas as pd\nfrom scipy.spatial import distance\nfrom scipy import signal\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\nwarnings.filterwarnings(\"ignore\")\n\n# Input data variables\n\nroot_folder = '/home/azureuser/projects/BrainGNN/data/'\ndata_folder = os.path.join(root_folder, 'ABIDE_pcp/cpac/filt_noglobal')\nphenotype = os.path.join(root_folder, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')\n\n\ndef fetch_filenames(subject_IDs, file_type, atlas):\n    \"\"\"\n        subject_list : list of short subject IDs in string format\n        file_type    : must be one of the available file types\n        filemapping  : resulting file name format\n    returns:\n        filenames    : list of filetypes (same length as subject_list)\n    \"\"\"\n\n    filemapping = {'func_preproc': '_func_preproc.nii.gz',\n                   'rois_' + atlas: '_rois_' + atlas + '.1D'}\n    # The list to be filled\n    filenames = []\n\n    # Fill list with requested file paths\n    for i in range(len(subject_IDs)):\n        os.chdir(data_folder)\n        try:\n            try:\n                os.chdir(data_folder)\n                filenames.append(glob.glob('*' + subject_IDs[i] + filemapping[file_type])[0])\n            except:\n                os.chdir(data_folder + '/' + subject_IDs[i])\n                filenames.append(glob.glob('*' + subject_IDs[i] + filemapping[file_type])[0])\n        except IndexError:\n            filenames.append('N/A')\n    return filenames\n\n\n# Get timeseries arrays for list of subjects\ndef get_timeseries(subject_list, atlas_name, silence=False):\n    \"\"\"\n        subject_list : list of short subject IDs in string format\n        atlas_name   : the atlas based on which the timeseries are generated e.g. aal, cc200\n    returns:\n        time_series  : list of timeseries arrays, each of shape (timepoints x regions)\n    \"\"\"\n\n    timeseries = []\n    for i in range(len(subject_list)):\n        subject_folder = os.path.join(data_folder, subject_list[i])\n        ro_file = [f for f in os.listdir(subject_folder) if f.endswith('_rois_' + atlas_name + '.1D')]\n        fl = os.path.join(subject_folder, ro_file[0])\n        if silence != True:\n            print(\"Reading timeseries file %s\" % fl)\n        timeseries.append(np.loadtxt(fl, skiprows=0))\n\n    return timeseries\n\n\n#  compute connectivity matrices\ndef subject_connectivity(timeseries, subjects, atlas_name, kind, iter_no='', seed=1234,\n                         n_subjects='', save=True, save_path=data_folder):\n    \"\"\"\n        timeseries   : timeseries table for subject (timepoints x regions)\n        subjects     : subject IDs\n        atlas_name   : name of the parcellation atlas used\n        kind         : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation\n        iter_no      : tangent connectivity iteration number for cross validation evaluation\n        save         : save the connectivity matrix to a file\n        save_path    : specify path to save the matrix if different from subject folder\n    returns:\n        connectivity : connectivity matrix (regions x regions)\n    \"\"\"\n\n    if kind in ['TPE', 'TE', 'correlation','partial correlation']:\n        if kind not in ['TPE', 'TE']:\n            conn_measure = connectome.ConnectivityMeasure(kind=kind)\n            connectivity = conn_measure.fit_transform(timeseries)\n        else:\n            if kind == 'TPE':\n                conn_measure = connectome.ConnectivityMeasure(kind='correlation')\n                conn_mat = conn_measure.fit_transform(timeseries)\n                conn_measure = connectome.ConnectivityMeasure(kind='tangent')\n                connectivity_fit = conn_measure.fit(conn_mat)\n                connectivity = connectivity_fit.transform(conn_mat)\n            else:\n                conn_measure = connectome.ConnectivityMeasure(kind='tangent')\n                connectivity_fit = conn_measure.fit(timeseries)\n                connectivity = connectivity_fit.transform(timeseries)\n\n    if save:\n        if kind not in ['TPE', 'TE']:\n            for i, subj_id in enumerate(subjects):\n                subject_file = os.path.join(save_path, subj_id,\n                                            subj_id + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')\n                sio.savemat(subject_file, {'connectivity': connectivity[i]})\n            return connectivity\n        else:\n            for i, subj_id in enumerate(subjects):\n                subject_file = os.path.join(save_path, subj_id,\n                                            subj_id + '_' + atlas_name + '_' + kind.replace(' ', '_') + '_' + str(\n                                                iter_no) + '_' + str(seed) + '_' + validation_ext + str(\n                                                n_subjects) + '.mat')\n                sio.savemat(subject_file, {'connectivity': connectivity[i]})\n            return connectivity_fit\n\n\n# Get the list of subject IDs\n\ndef get_ids(num_subjects=None):\n    \"\"\"\n    return:\n        subject_IDs    : list of all subject IDs\n    \"\"\"\n\n    subject_IDs = np.genfromtxt(os.path.join(data_folder, 'subject_IDs.txt'), dtype=str)\n\n    if num_subjects is not None:\n        subject_IDs = subject_IDs[:num_subjects]\n\n    return subject_IDs\n\n\n# Get phenotype values for a list of subjects\ndef get_subject_score(subject_list, score):\n    scores_dict = {}\n\n    with open(phenotype) as csv_file:\n        reader = csv.DictReader(csv_file)\n        for row in reader:\n            if row['SUB_ID'] in subject_list:\n                if score == 'HANDEDNESS_CATEGORY':\n                    if (row[score].strip() == '-9999') or (row[score].strip() == ''):\n                        scores_dict[row['SUB_ID']] = 'R'\n                    elif row[score] == 'Mixed':\n                        scores_dict[row['SUB_ID']] = 'Ambi'\n                    elif row[score] == 'L->R':\n                        scores_dict[row['SUB_ID']] = 'Ambi'\n                    else:\n                        scores_dict[row['SUB_ID']] = row[score]\n                elif (score == 'FIQ' or score == 'PIQ' or score == 'VIQ'):\n                    if (row[score].strip() == '-9999') or (row[score].strip() == ''):\n                        scores_dict[row['SUB_ID']] = 100\n                    else:\n                        scores_dict[row['SUB_ID']] = float(row[score])\n\n                else:\n                    scores_dict[row['SUB_ID']] = row[score]\n\n    return scores_dict\n\n\n# preprocess phenotypes. Categorical -> ordinal representation\ndef preprocess_phenotypes(pheno_ft, params):\n    if params['model'] == 'MIDA':\n        ct = ColumnTransformer([(\"ordinal\", OrdinalEncoder(), [0, 1, 2])], remainder='passthrough')\n    else:\n        ct = ColumnTransformer([(\"ordinal\", OrdinalEncoder(), [0, 1, 2, 3])], remainder='passthrough')\n\n    pheno_ft = ct.fit_transform(pheno_ft)\n    pheno_ft = pheno_ft.astype('float32')\n\n    return (pheno_ft)\n\n\n# create phenotype feature vector to concatenate with fmri feature vectors\ndef phenotype_ft_vector(pheno_ft, num_subjects, params):\n    gender = pheno_ft[:, 0]\n    if params['model'] == 'MIDA':\n        eye = pheno_ft[:, 0]\n        hand = pheno_ft[:, 2]\n        age = pheno_ft[:, 3]\n        fiq = pheno_ft[:, 4]\n    else:\n        eye = pheno_ft[:, 2]\n        hand = pheno_ft[:, 3]\n        age = pheno_ft[:, 4]\n        fiq = pheno_ft[:, 5]\n\n    phenotype_ft = np.zeros((num_subjects, 4))\n    phenotype_ft_eye = np.zeros((num_subjects, 2))\n    phenotype_ft_hand = np.zeros((num_subjects, 3))\n\n    for i in range(num_subjects):\n        phenotype_ft[i, int(gender[i])] = 1\n        phenotype_ft[i, -2] = age[i]\n        phenotype_ft[i, -1] = fiq[i]\n        phenotype_ft_eye[i, int(eye[i])] = 1\n        phenotype_ft_hand[i, int(hand[i])] = 1\n\n    if params['model'] == 'MIDA':\n        phenotype_ft = np.concatenate([phenotype_ft, phenotype_ft_hand], axis=1)\n    else:\n        phenotype_ft = np.concatenate([phenotype_ft, phenotype_ft_hand, phenotype_ft_eye], axis=1)\n\n    return phenotype_ft\n\n\n# Load precomputed fMRI connectivity networks\ndef get_networks(subject_list, kind, iter_no='', seed=1234, n_subjects='', atlas_name=\"aal\",\n                 variable='connectivity'):\n    \"\"\"\n        subject_list : list of subject IDs\n        kind         : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation\n        atlas_name   : name of the parcellation atlas used\n        variable     : variable name in the .mat file that has been used to save the precomputed networks\n    return:\n        matrix      : feature matrix of connectivity networks (num_subjects x network_size)\n    \"\"\"\n\n    all_networks = []\n    for subject in subject_list:\n        if len(kind.split()) == 2:\n            kind = '_'.join(kind.split())\n        fl = os.path.join(data_folder, subject,\n                              subject + \"_\" + atlas_name + \"_\" + kind.replace(' ', '_') + \".mat\")\n\n\n        matrix = sio.loadmat(fl)[variable]\n        all_networks.append(matrix)\n\n    if kind in ['TE', 'TPE']:\n        norm_networks = [mat for mat in all_networks]\n    else:\n        norm_networks = [np.arctanh(mat) for mat in all_networks]\n\n    networks = np.stack(norm_networks)\n\n    return networks\n\n"
  },
  {
    "path": "imports/read_abide_stats_parall.py",
    "content": "'''\nAuthor: Xiaoxiao Li\nDate: 2019/02/24\n'''\n\nimport os.path as osp\nfrom os import listdir\nimport os\nimport glob\nimport h5py\n\nimport torch\nimport numpy as np\nfrom scipy.io import loadmat\nfrom torch_geometric.data import Data\nimport networkx as nx\nfrom networkx.convert_matrix import from_numpy_matrix\nimport multiprocessing\nfrom torch_sparse import coalesce\nfrom torch_geometric.utils import remove_self_loops\nfrom functools import partial\nimport deepdish as dd\nfrom imports.gdc import GDC\n\n\ndef split(data, batch):\n    node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)\n    node_slice = torch.cat([torch.tensor([0]), node_slice])\n\n    row, _ = data.edge_index\n    edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)\n    edge_slice = torch.cat([torch.tensor([0]), edge_slice])\n\n    # Edge indices should start at zero for every graph.\n    data.edge_index -= node_slice[batch[row]].unsqueeze(0)\n\n    slices = {'edge_index': edge_slice}\n    if data.x is not None:\n        slices['x'] = node_slice\n    if data.edge_attr is not None:\n        slices['edge_attr'] = edge_slice\n    if data.y is not None:\n        if data.y.size(0) == batch.size(0):\n            slices['y'] = node_slice\n        else:\n            slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)\n    if data.pos is not None:\n        slices['pos'] = node_slice\n\n    return data, slices\n\n\ndef cat(seq):\n    seq = [item for item in seq if item is not None]\n    seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq]\n    return torch.cat(seq, dim=-1).squeeze() if len(seq) > 0 else None\n\nclass NoDaemonProcess(multiprocessing.Process):\n    @property\n    def daemon(self):\n        return False\n\n    @daemon.setter\n    def daemon(self, value):\n        pass\n\n\nclass NoDaemonContext(type(multiprocessing.get_context())):\n    Process = NoDaemonProcess\n\n\ndef read_data(data_dir):\n    onlyfiles = [f for f in listdir(data_dir) if osp.isfile(osp.join(data_dir, f))]\n    onlyfiles.sort()\n    batch = []\n    pseudo = []\n    y_list = []\n    edge_att_list, edge_index_list,att_list = [], [], []\n\n    # parallar computing\n    cores = multiprocessing.cpu_count()\n    pool = multiprocessing.Pool(processes=cores)\n    #pool =  MyPool(processes = cores)\n    func = partial(read_sigle_data, data_dir)\n\n    import timeit\n\n    start = timeit.default_timer()\n\n    res = pool.map(func, onlyfiles)\n\n    pool.close()\n    pool.join()\n\n    stop = timeit.default_timer()\n\n    print('Time: ', stop - start)\n\n\n\n    for j in range(len(res)):\n        edge_att_list.append(res[j][0])\n        edge_index_list.append(res[j][1]+j*res[j][4])\n        att_list.append(res[j][2])\n        y_list.append(res[j][3])\n        batch.append([j]*res[j][4])\n        pseudo.append(np.diag(np.ones(res[j][4])))\n\n    edge_att_arr = np.concatenate(edge_att_list)\n    edge_index_arr = np.concatenate(edge_index_list, axis=1)\n    att_arr = np.concatenate(att_list, axis=0)\n    pseudo_arr = np.concatenate(pseudo, axis=0)\n    y_arr = np.stack(y_list)\n    edge_att_torch = torch.from_numpy(edge_att_arr.reshape(len(edge_att_arr), 1)).float()\n    att_torch = torch.from_numpy(att_arr).float()\n    y_torch = torch.from_numpy(y_arr).long()  # classification\n    batch_torch = torch.from_numpy(np.hstack(batch)).long()\n    edge_index_torch = torch.from_numpy(edge_index_arr).long()\n    pseudo_torch = torch.from_numpy(pseudo_arr).float()\n    data = Data(x=att_torch, edge_index=edge_index_torch, y=y_torch, edge_attr=edge_att_torch, pos = pseudo_torch )\n\n\n    data, slices = split(data, batch_torch)\n\n    return data, slices\n\n\ndef read_sigle_data(data_dir,filename,use_gdc =False):\n\n    temp = dd.io.load(osp.join(data_dir, filename))\n\n    # read edge and edge attribute\n    pcorr = np.abs(temp['pcorr'][()])\n\n    num_nodes = pcorr.shape[0]\n    G = from_numpy_matrix(pcorr)\n    A = nx.to_scipy_sparse_matrix(G)\n    adj = A.tocoo()\n    edge_att = np.zeros(len(adj.row))\n    for i in range(len(adj.row)):\n        edge_att[i] = pcorr[adj.row[i], adj.col[i]]\n\n    edge_index = np.stack([adj.row, adj.col])\n    edge_index, edge_att = remove_self_loops(torch.from_numpy(edge_index), torch.from_numpy(edge_att))\n    edge_index = edge_index.long()\n    edge_index, edge_att = coalesce(edge_index, edge_att, num_nodes,\n                                    num_nodes)\n    att = temp['corr'][()]\n    label = temp['label'][()]\n\n    att_torch = torch.from_numpy(att).float()\n    y_torch = torch.from_numpy(np.array(label)).long()  # classification\n\n    data = Data(x=att_torch, edge_index=edge_index.long(), y=y_torch, edge_attr=edge_att)\n\n    if use_gdc:\n        '''\n        Implementation of https://papers.nips.cc/paper/2019/hash/23c894276a2c5a16470e6a31f4618d73-Abstract.html\n        '''\n        data.edge_attr = data.edge_attr.squeeze()\n        gdc = GDC(self_loop_weight=1, normalization_in='sym',\n                  normalization_out='col',\n                  diffusion_kwargs=dict(method='ppr', alpha=0.2),\n                  sparsification_kwargs=dict(method='topk', k=20,\n                                             dim=0), exact=True)\n        data = gdc(data)\n        return data.edge_attr.data.numpy(),data.edge_index.data.numpy(),data.x.data.numpy(),data.y.data.item(),num_nodes\n\n    else:\n        return edge_att.data.numpy(),edge_index.data.numpy(),att,label,num_nodes\n\nif __name__ == \"__main__\":\n    data_dir = '/home/azureuser/projects/BrainGNN/data/ABIDE_pcp/cpac/filt_noglobal/raw'\n    filename = '50346.h5'\n    read_sigle_data(data_dir, filename)\n\n\n\n\n\n\n"
  },
  {
    "path": "imports/utils.py",
    "content": "from scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom scipy.io import loadmat\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import KFold\n\n\ndef train_val_test_split(kfold = 5, fold = 0):\n    n_sub = 1035\n    id = list(range(n_sub))\n\n\n    import random\n    random.seed(123)\n    random.shuffle(id)\n\n    kf = KFold(n_splits=kfold, random_state=123,shuffle = True)\n    kf2 = KFold(n_splits=kfold-1, shuffle=True, random_state = 666)\n\n\n    test_index = list()\n    train_index = list()\n    val_index = list()\n\n    for tr,te in kf.split(np.array(id)):\n        test_index.append(te)\n        tr_id, val_id = list(kf2.split(tr))[0]\n        train_index.append(tr[tr_id])\n        val_index.append(tr[val_id])\n\n    train_id = train_index[fold]\n    test_id = test_index[fold]\n    val_id = val_index[fold]\n\n    return train_id,val_id,test_id"
  },
  {
    "path": "net/braingnn.py",
    "content": "import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch_geometric.nn import TopKPooling\nfrom torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp\nfrom torch_geometric.utils import (add_self_loops, sort_edge_index,\n                                   remove_self_loops)\nfrom torch_sparse import spspmm\n\nfrom net.braingraphconv import MyNNConv\n\n\n##########################################################################################################################\nclass Network(torch.nn.Module):\n    def __init__(self, indim, ratio, nclass, k=8, R=200):\n        '''\n\n        :param indim: (int) node feature dimension\n        :param ratio: (float) pooling ratio in (0,1)\n        :param nclass: (int)  number of classes\n        :param k: (int) number of communities\n        :param R: (int) number of ROIs\n        '''\n        super(Network, self).__init__()\n\n        self.indim = indim\n        self.dim1 = 32\n        self.dim2 = 32\n        self.dim3 = 512\n        self.dim4 = 256\n        self.dim5 = 8\n        self.k = k\n        self.R = R\n\n        self.n1 = nn.Sequential(nn.Linear(self.R, self.k, bias=False), nn.ReLU(), nn.Linear(self.k, self.dim1 * self.indim))\n        self.conv1 = MyNNConv(self.indim, self.dim1, self.n1, normalize=False)\n        self.pool1 = TopKPooling(self.dim1, ratio=ratio, multiplier=1, nonlinearity=torch.sigmoid)\n        self.n2 = nn.Sequential(nn.Linear(self.R, self.k, bias=False), nn.ReLU(), nn.Linear(self.k, self.dim2 * self.dim1))\n        self.conv2 = MyNNConv(self.dim1, self.dim2, self.n2, normalize=False)\n        self.pool2 = TopKPooling(self.dim2, ratio=ratio, multiplier=1, nonlinearity=torch.sigmoid)\n\n        #self.fc1 = torch.nn.Linear((self.dim2) * 2, self.dim2)\n        self.fc1 = torch.nn.Linear((self.dim1+self.dim2)*2, self.dim2)\n        self.bn1 = torch.nn.BatchNorm1d(self.dim2)\n        self.fc2 = torch.nn.Linear(self.dim2, self.dim3)\n        self.bn2 = torch.nn.BatchNorm1d(self.dim3)\n        self.fc3 = torch.nn.Linear(self.dim3, nclass)\n\n\n\n\n    def forward(self, x, edge_index, batch, edge_attr, pos):\n\n        x = self.conv1(x, edge_index, edge_attr, pos)\n        x, edge_index, edge_attr, batch, perm, score1 = self.pool1(x, edge_index, edge_attr, batch)\n\n        pos = pos[perm]\n        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\n\n        edge_attr = edge_attr.squeeze()\n        edge_index, edge_attr = self.augment_adj(edge_index, edge_attr, x.size(0))\n\n        x = self.conv2(x, edge_index, edge_attr, pos)\n        x, edge_index, edge_attr, batch, perm, score2 = self.pool2(x, edge_index,edge_attr, batch)\n\n        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\n\n        x = torch.cat([x1,x2], dim=1)\n        x = self.bn1(F.relu(self.fc1(x)))\n        x = F.dropout(x, p=0.5, training=self.training)\n        x = self.bn2(F.relu(self.fc2(x)))\n        x= F.dropout(x, p=0.5, training=self.training)\n        x = F.log_softmax(self.fc3(x), dim=-1)\n\n        return x,self.pool1.weight,self.pool2.weight, torch.sigmoid(score1).view(x.size(0),-1), torch.sigmoid(score2).view(x.size(0),-1)\n\n    def augment_adj(self, edge_index, edge_weight, num_nodes):\n        edge_index, edge_weight = add_self_loops(edge_index, edge_weight,\n                                                 num_nodes=num_nodes)\n        edge_index, edge_weight = sort_edge_index(edge_index, edge_weight,\n                                                  num_nodes)\n        edge_index, edge_weight = spspmm(edge_index, edge_weight, edge_index,\n                                         edge_weight, num_nodes, num_nodes,\n                                         num_nodes)\n        edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)\n        return edge_index, edge_weight\n\n"
  },
  {
    "path": "net/braingraphconv.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\nfrom net.brainmsgpassing import MyMessagePassing\nfrom torch_geometric.utils import add_remaining_self_loops,softmax\n\nfrom torch_geometric.typing import (OptTensor)\n\nfrom net.inits import uniform\n\n\nclass MyNNConv(MyMessagePassing):\n    def __init__(self, in_channels, out_channels, nn, normalize=False, bias=True,\n                 **kwargs):\n        super(MyNNConv, self).__init__(aggr='mean', **kwargs)\n\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.normalize = normalize\n        self.nn = nn\n        #self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))\n\n        if bias:\n            self.bias = Parameter(torch.Tensor(out_channels))\n        else:\n            self.register_parameter('bias', None)\n\n        self.reset_parameters()\n\n    def reset_parameters(self):\n#        uniform(self.in_channels, self.weight)\n        uniform(self.in_channels, self.bias)\n\n    def forward(self, x, edge_index, edge_weight=None, pseudo= None, size=None):\n        \"\"\"\"\"\"\n        edge_weight = edge_weight.squeeze()\n        if size is None and torch.is_tensor(x):\n            edge_index, edge_weight = add_remaining_self_loops(\n                edge_index, edge_weight, 1, x.size(0))\n\n        weight = self.nn(pseudo).view(-1, self.in_channels, self.out_channels)\n        if torch.is_tensor(x):\n            x = torch.matmul(x.unsqueeze(1), weight).squeeze(1)\n        else:\n            x = (None if x[0] is None else torch.matmul(x[0].unsqueeze(1), weight).squeeze(1),\n                 None if x[1] is None else torch.matmul(x[1].unsqueeze(1), weight).squeeze(1))\n\n        # weight = self.nn(pseudo).view(-1, self.out_channels,self.in_channels)\n        # if torch.is_tensor(x):\n        #     x = torch.matmul(x.unsqueeze(1), weight.permute(0,2,1)).squeeze(1)\n        # else:\n        #     x = (None if x[0] is None else torch.matmul(x[0].unsqueeze(1), weight).squeeze(1),\n        #          None if x[1] is None else torch.matmul(x[1].unsqueeze(1), weight).squeeze(1))\n\n        return self.propagate(edge_index, size=size, x=x,\n                              edge_weight=edge_weight)\n\n    def message(self, edge_index_i, size_i, x_j, edge_weight, ptr: OptTensor):\n        edge_weight = softmax(edge_weight, edge_index_i, ptr, size_i)\n        return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j\n\n    def update(self, aggr_out):\n        if self.bias is not None:\n            aggr_out = aggr_out + self.bias\n        if self.normalize:\n            aggr_out = F.normalize(aggr_out, p=2, dim=-1)\n        return aggr_out\n\n    def __repr__(self):\n        return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n                                   self.out_channels)\n\n"
  },
  {
    "path": "net/brainmsgpassing.py",
    "content": "import sys\nimport inspect\n\nimport torch\n# from torch_geometric.utils import scatter_\nfrom torch_scatter import scatter,scatter_add\n\nspecial_args = [\n    'edge_index', 'edge_index_i', 'edge_index_j', 'size', 'size_i', 'size_j'\n]\n__size_error_msg__ = ('All tensors which should get mapped to the same source '\n                      'or target nodes must be of same size in dimension 0.')\n\nis_python2 = sys.version_info[0] < 3\ngetargspec = inspect.getargspec if is_python2 else inspect.getfullargspec\n\n\nclass MyMessagePassing(torch.nn.Module):\n    r\"\"\"Base class for creating message passing layers\n    .. math::\n        \\mathbf{x}_i^{\\prime} = \\gamma_{\\mathbf{\\Theta}} \\left( \\mathbf{x}_i,\n        \\square_{j \\in \\mathcal{N}(i)} \\, \\phi_{\\mathbf{\\Theta}}\n        \\left(\\mathbf{x}_i, \\mathbf{x}_j,\\mathbf{e}_{i,j}\\right) \\right),\n    where :math:`\\square` denotes a differentiable, permutation invariant\n    function, *e.g.*, sum, mean or max, and :math:`\\gamma_{\\mathbf{\\Theta}}`\n    and :math:`\\phi_{\\mathbf{\\Theta}}` denote differentiable functions such as\n    MLPs.\n    See `here <https://pytorch-geometric.readthedocs.io/en/latest/notes/\n    create_gnn.html>`__ for the accompanying tutorial.\n    Args:\n        aggr (string, optional): The aggregation scheme to use\n            (:obj:`\"add\"`, :obj:`\"mean\"` or :obj:`\"max\"`).\n            (default: :obj:`\"add\"`)\n        flow (string, optional): The flow direction of message passing\n            (:obj:`\"source_to_target\"` or :obj:`\"target_to_source\"`).\n            (default: :obj:`\"source_to_target\"`)\n        node_dim (int, optional): The axis along which to propagate.\n            (default: :obj:`0`)\n    \"\"\"\n    def __init__(self, aggr='add', flow='source_to_target', node_dim=0):\n        super(MyMessagePassing, self).__init__()\n\n        self.aggr = aggr\n        assert self.aggr in ['add', 'mean', 'max']\n\n        self.flow = flow\n        assert self.flow in ['source_to_target', 'target_to_source']\n\n        self.node_dim = node_dim\n        assert self.node_dim >= 0\n\n        self.__message_args__ = getargspec(self.message)[0][1:]\n        self.__special_args__ = [(i, arg)\n                                 for i, arg in enumerate(self.__message_args__)\n                                 if arg in special_args]\n        self.__message_args__ = [\n            arg for arg in self.__message_args__ if arg not in special_args\n        ]\n        self.__update_args__ = getargspec(self.update)[0][2:]\n\n    def propagate(self, edge_index, size=None, **kwargs):\n        r\"\"\"The initial call to start propagating messages.\n        Args:\n            edge_index (Tensor): The indices of a general (sparse) assignment\n                matrix with shape :obj:`[N, M]` (can be directed or\n                undirected).\n            size (list or tuple, optional): The size :obj:`[N, M]` of the\n                assignment matrix. If set to :obj:`None`, the size is tried to\n                get automatically inferred and assumed to be symmetric.\n                (default: :obj:`None`)\n            **kwargs: Any additional data which is needed to construct messages\n                and to update node embeddings.\n        \"\"\"\n\n        dim = self.node_dim\n        size = [None, None] if size is None else list(size)\n        assert len(size) == 2\n\n        i, j = (0, 1) if self.flow == 'target_to_source' else (1, 0)\n        ij = {\"_i\": i, \"_j\": j}\n\n        message_args = []\n        for arg in self.__message_args__:\n            if arg[-2:] in ij.keys():\n                tmp = kwargs.get(arg[:-2], None)\n                if tmp is None:  # pragma: no cover\n                    message_args.append(tmp)\n                else:\n                    idx = ij[arg[-2:]]\n                    if isinstance(tmp, tuple) or isinstance(tmp, list):\n                        assert len(tmp) == 2\n                        if tmp[1 - idx] is not None:\n                            if size[1 - idx] is None:\n                                size[1 - idx] = tmp[1 - idx].size(dim)\n                            if size[1 - idx] != tmp[1 - idx].size(dim):\n                                raise ValueError(__size_error_msg__)\n                        tmp = tmp[idx]\n\n                    if tmp is None:\n                        message_args.append(tmp)\n                    else:\n                        if size[idx] is None:\n                            size[idx] = tmp.size(dim)\n                        if size[idx] != tmp.size(dim):\n                            raise ValueError(__size_error_msg__)\n\n                        tmp = torch.index_select(tmp, dim, edge_index[idx])\n                        message_args.append(tmp)\n            else:\n                message_args.append(kwargs.get(arg, None))\n\n        size[0] = size[1] if size[0] is None else size[0]\n        size[1] = size[0] if size[1] is None else size[1]\n\n        kwargs['edge_index'] = edge_index\n        kwargs['size'] = size\n\n        for (idx, arg) in self.__special_args__:\n            if arg[-2:] in ij.keys():\n                message_args.insert(idx, kwargs[arg[:-2]][ij[arg[-2:]]])\n            else:\n                message_args.insert(idx, kwargs[arg])\n\n        update_args = [kwargs[arg] for arg in self.__update_args__]\n\n        out = self.message(*message_args)\n        # out = scatter_(self.aggr, out, edge_index[i], dim, dim_size=size[i])\n        out = scatter_add(out, edge_index[i], dim, dim_size=size[i])\n        out = self.update(out, *update_args)\n\n        return out\n\n    def message(self, x_j):  # pragma: no cover\n        r\"\"\"Constructs messages to node :math:`i` in analogy to\n        :math:`\\phi_{\\mathbf{\\Theta}}` for each edge in\n        :math:`(j,i) \\in \\mathcal{E}` if :obj:`flow=\"source_to_target\"` and\n        :math:`(i,j) \\in \\mathcal{E}` if :obj:`flow=\"target_to_source\"`.\n        Can take any argument which was initially passed to :meth:`propagate`.\n        In addition, tensors passed to :meth:`propagate` can be mapped to the\n        respective nodes :math:`i` and :math:`j` by appending :obj:`_i` or\n        :obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`.\n        \"\"\"\n\n        return x_j\n\n    def update(self, aggr_out):  # pragma: no cover\n        r\"\"\"Updates node embeddings in analogy to\n        :math:`\\gamma_{\\mathbf{\\Theta}}` for each node\n        :math:`i \\in \\mathcal{V}`.\n        Takes in the output of aggregation as first argument and any argument\n        which was initially passed to :meth:`propagate`.\"\"\"\n\n        return aggr_out\n"
  },
  {
    "path": "net/inits.py",
    "content": "import math\n\n\ndef uniform(size, tensor):\n    bound = 1.0 / math.sqrt(size)\n    if tensor is not None:\n        tensor.data.uniform_(-bound, bound)\n\n\ndef kaiming_uniform(tensor, fan, a):\n    if tensor is not None:\n        bound = math.sqrt(6 / ((1 + a**2) * fan))\n        tensor.data.uniform_(-bound, bound)\n\n\ndef glorot(tensor):\n    if tensor is not None:\n        stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))\n        tensor.data.uniform_(-stdv, stdv)\n\n\ndef zeros(tensor):\n    if tensor is not None:\n        tensor.data.fill_(0)\n\n\ndef ones(tensor):\n    if tensor is not None:\n        tensor.data.fill_(1)"
  },
  {
    "path": "requirements.txt",
    "content": "alabaster @ file:///home/ktietz/src/ci/alabaster_1611921544520/work\nanaconda-client==1.7.2\nanaconda-project @ file:///tmp/build/80754af9/anaconda-project_1610472525955/work\nanyio @ file:///tmp/build/80754af9/anyio_1617783275907/work/dist\nappdirs==1.4.4\nargh==0.26.2\nargon2-cffi @ file:///tmp/build/80754af9/argon2-cffi_1613037097816/work\narrow==0.13.1\nase==3.21.1\nasn1crypto @ file:///tmp/build/80754af9/asn1crypto_1596577642040/work\nastroid @ file:///tmp/build/80754af9/astroid_1613500854201/work\nastropy @ file:///tmp/build/80754af9/astropy_1617745353437/work\nasync-generator @ file:///home/ktietz/src/ci/async_generator_1611927993394/work\natomicwrites==1.4.0\nattrs @ file:///tmp/build/80754af9/attrs_1604765588209/work\nautopep8 @ file:///tmp/build/80754af9/autopep8_1615918855173/work\nBabel @ file:///tmp/build/80754af9/babel_1607110387436/work\nbackcall @ file:///home/ktietz/src/ci/backcall_1611930011877/work\nbackports.shutil-get-terminal-size @ file:///tmp/build/80754af9/backports.shutil_get_terminal_size_1608222128777/work\nbeautifulsoup4 @ file:///home/linux1/recipes/ci/beautifulsoup4_1610988766420/work\nbinaryornot @ file:///tmp/build/80754af9/binaryornot_1617751525010/work\nbitarray @ file:///tmp/build/80754af9/bitarray_1618431750766/work\nbkcharts==0.2\nblack==19.10b0\nbleach @ file:///tmp/build/80754af9/bleach_1612211392645/work\nbokeh @ file:///tmp/build/80754af9/bokeh_1617824541184/work\nboto==2.49.0\nBottleneck==1.3.2\nbrotlipy==0.7.0\ncertifi==2020.12.5\ncffi @ file:///tmp/build/80754af9/cffi_1613246945912/work\nchardet @ file:///tmp/build/80754af9/chardet_1607706746162/work\nclick @ file:///home/linux1/recipes/ci/click_1610990599742/work\ncloudpickle @ file:///tmp/build/80754af9/cloudpickle_1598884132938/work\nclyent==1.2.2\ncolorama @ file:///tmp/build/80754af9/colorama_1607707115595/work\ncontextlib2==0.6.0.post1\ncookiecutter @ file:///tmp/build/80754af9/cookiecutter_1617748928239/work\ncryptography @ file:///tmp/build/80754af9/cryptography_1616769286105/work\ncycler==0.10.0\nCython @ file:///tmp/build/80754af9/cython_1618435160151/work\ncytoolz==0.11.0\ndask @ file:///tmp/build/80754af9/dask-core_1617390489108/work\ndecorator @ file:///tmp/build/80754af9/decorator_1617916966915/work\ndeepdish==0.3.6\ndefusedxml @ file:///tmp/build/80754af9/defusedxml_1615228127516/work\ndiff-match-patch @ file:///tmp/build/80754af9/diff-match-patch_1594828741838/work\ndistributed @ file:///tmp/build/80754af9/distributed_1617381497899/work\ndocutils @ file:///tmp/build/80754af9/docutils_1617624660125/work\nentrypoints==0.3\net-xmlfile==1.0.1\nfastcache==1.1.0\nfilelock @ file:///home/linux1/recipes/ci/filelock_1610993975404/work\nflake8 @ file:///tmp/build/80754af9/flake8_1615834841867/work\nFlask @ file:///home/ktietz/src/ci/flask_1611932660458/work\nfsspec @ file:///tmp/build/80754af9/fsspec_1617959894824/work\nfuture==0.18.2\ngevent @ file:///tmp/build/80754af9/gevent_1616770671827/work\nglob2 @ file:///home/linux1/recipes/ci/glob2_1610991677669/work\ngmpy2==2.0.8\ngoogledrivedownloader==0.4\ngreenlet @ file:///tmp/build/80754af9/greenlet_1611957705398/work\nh5py==2.10.0\nHeapDict==1.0.1\nhtml5lib @ file:///tmp/build/80754af9/html5lib_1593446221756/work\nidna @ file:///home/linux1/recipes/ci/idna_1610986105248/work\nimageio @ file:///tmp/build/80754af9/imageio_1617700267927/work\nimagesize @ file:///home/ktietz/src/ci/imagesize_1611921604382/work\nimportlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1617874469820/work\ninflection==0.5.1\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\nintervaltree @ file:///tmp/build/80754af9/intervaltree_1598376443606/work\nipykernel @ file:///tmp/build/80754af9/ipykernel_1596207638929/work/dist/ipykernel-5.3.4-py3-none-any.whl\nipython @ file:///tmp/build/80754af9/ipython_1617120885885/work\nipython-genutils @ file:///tmp/build/80754af9/ipython_genutils_1606773439826/work\nipywidgets @ file:///tmp/build/80754af9/ipywidgets_1610481889018/work\nisodate==0.6.0\nisort @ file:///tmp/build/80754af9/isort_1616355431277/work\nitsdangerous @ file:///home/ktietz/src/ci/itsdangerous_1611932585308/work\njdcal==1.4.1\njedi @ file:///tmp/build/80754af9/jedi_1606932564285/work\njeepney @ file:///tmp/build/80754af9/jeepney_1606148855031/work\nJinja2 @ file:///tmp/build/80754af9/jinja2_1612213139570/work\njinja2-time @ file:///tmp/build/80754af9/jinja2-time_1617751524098/work\njoblib @ file:///tmp/build/80754af9/joblib_1613502643832/work\njson5==0.9.5\njsonschema @ file:///tmp/build/80754af9/jsonschema_1602607155483/work\njupyter==1.0.0\njupyter-client @ file:///tmp/build/80754af9/jupyter_client_1616770841739/work\njupyter-console @ file:///tmp/build/80754af9/jupyter_console_1616615302928/work\njupyter-core @ file:///tmp/build/80754af9/jupyter_core_1612213311222/work\njupyter-packaging @ file:///tmp/build/80754af9/jupyter-packaging_1613502826984/work\njupyter-server @ file:///tmp/build/80754af9/jupyter_server_1616083640759/work\njupyterlab @ file:///tmp/build/80754af9/jupyterlab_1619133235951/work\njupyterlab-pygments @ file:///tmp/build/80754af9/jupyterlab_pygments_1601490720602/work\njupyterlab-server @ file:///tmp/build/80754af9/jupyterlab_server_1617134334258/work\njupyterlab-widgets @ file:///tmp/build/80754af9/jupyterlab_widgets_1609884341231/work\nkeyring @ file:///tmp/build/80754af9/keyring_1614616740399/work\nkiwisolver @ file:///tmp/build/80754af9/kiwisolver_1612282420641/work\nlazy-object-proxy @ file:///tmp/build/80754af9/lazy-object-proxy_1616526917483/work\nlibarchive-c @ file:///tmp/build/80754af9/python-libarchive-c_1617780486945/work\nllvmlite==0.36.0\nlocket==0.2.1\nlxml @ file:///tmp/build/80754af9/lxml_1616443220220/work\nMarkupSafe==1.1.1\nmatplotlib @ file:///tmp/build/80754af9/matplotlib-suite_1613407855456/work\nmccabe==0.6.1\nmistune==0.8.4\nmkl-fft==1.3.0\nmkl-random @ file:///tmp/build/80754af9/mkl_random_1618853849286/work\nmkl-service==2.3.0\nmock @ file:///tmp/build/80754af9/mock_1607622725907/work\nmore-itertools @ file:///tmp/build/80754af9/more-itertools_1613676688952/work\nmpmath==1.2.1\nmsgpack @ file:///tmp/build/80754af9/msgpack-python_1612287151062/work\nmultipledispatch==0.6.0\nmypy-extensions==0.4.3\nnbclassic @ file:///tmp/build/80754af9/nbclassic_1616085367084/work\nnbclient @ file:///tmp/build/80754af9/nbclient_1614364831625/work\nnbconvert @ file:///tmp/build/80754af9/nbconvert_1601914830498/work\nnbformat @ file:///tmp/build/80754af9/nbformat_1617383369282/work\nnest-asyncio @ file:///tmp/build/80754af9/nest-asyncio_1613680548246/work\nnetworkx @ file:///tmp/build/80754af9/networkx_1598376031484/work\nnibabel==3.2.1\nnilearn==0.7.1\nnltk @ file:///tmp/build/80754af9/nltk_1618327084230/work\nnose @ file:///tmp/build/80754af9/nose_1606773131901/work\nnotebook @ file:///tmp/build/80754af9/notebook_1616443462982/work\nnumba @ file:///tmp/build/80754af9/numba_1616774046117/work\nnumexpr @ file:///tmp/build/80754af9/numexpr_1618856167419/work\nnumpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1618497241363/work\nnumpydoc @ file:///tmp/build/80754af9/numpydoc_1605117425582/work\nolefile==0.46\nopenpyxl @ file:///tmp/build/80754af9/openpyxl_1615411699337/work\npackaging @ file:///tmp/build/80754af9/packaging_1611952188834/work\npandas==1.2.4\npandocfilters @ file:///tmp/build/80754af9/pandocfilters_1605120460739/work\nparso==0.7.0\npartd @ file:///tmp/build/80754af9/partd_1618000087440/work\npath @ file:///tmp/build/80754af9/path_1614022220526/work\npathlib2 @ file:///tmp/build/80754af9/pathlib2_1607024983162/work\npathspec==0.7.0\npathtools==0.1.2\npatsy==0.5.1\npep8==1.7.1\npexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work\npickleshare @ file:///tmp/build/80754af9/pickleshare_1606932040724/work\nPillow @ file:///tmp/build/80754af9/pillow_1617383569452/work\npkginfo==1.7.0\npluggy @ file:///tmp/build/80754af9/pluggy_1615976321666/work\nply==3.11\npoyo @ file:///tmp/build/80754af9/poyo_1617751526755/work\nprometheus-client @ file:///tmp/build/80754af9/prometheus_client_1618088486455/work\nprompt-toolkit @ file:///tmp/build/80754af9/prompt-toolkit_1616415428029/work\nprotobuf==3.17.0\npsutil @ file:///tmp/build/80754af9/psutil_1612298023621/work\nptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl\npy @ file:///tmp/build/80754af9/py_1607971587848/work\npycodestyle @ file:///home/ktietz/src/ci_mi/pycodestyle_1612807597675/work\npycosat==0.6.3\npycparser @ file:///tmp/build/80754af9/pycparser_1594388511720/work\npycurl==7.43.0.6\npydocstyle @ file:///tmp/build/80754af9/pydocstyle_1616182067796/work\npyerfa @ file:///tmp/build/80754af9/pyerfa_1619390903914/work\npyflakes @ file:///home/ktietz/src/ci_ipy2/pyflakes_1612551159640/work\nPygments @ file:///tmp/build/80754af9/pygments_1615143339740/work\npylint @ file:///tmp/build/80754af9/pylint_1617135829881/work\npyls-black @ file:///tmp/build/80754af9/pyls-black_1607553132291/work\npyls-spyder @ file:///tmp/build/80754af9/pyls-spyder_1613849700860/work\npyodbc===4.0.0-unsupported\npyOpenSSL @ file:///tmp/build/80754af9/pyopenssl_1608057966937/work\npyparsing @ file:///home/linux1/recipes/ci/pyparsing_1610983426697/work\npyrsistent @ file:///tmp/build/80754af9/pyrsistent_1600141720057/work\nPySocks @ file:///tmp/build/80754af9/pysocks_1605305779399/work\npytest==6.2.3\npython-dateutil @ file:///home/ktietz/src/ci/python-dateutil_1611928101742/work\npython-jsonrpc-server @ file:///tmp/build/80754af9/python-jsonrpc-server_1600278539111/work\npython-language-server @ file:///tmp/build/80754af9/python-language-server_1607972495879/work\npython-louvain==0.15\npython-slugify @ file:///tmp/build/80754af9/python-slugify_1620405669636/work\npytz @ file:///tmp/build/80754af9/pytz_1612215392582/work\nPyWavelets @ file:///tmp/build/80754af9/pywavelets_1601658317819/work\npyxdg @ file:///tmp/build/80754af9/pyxdg_1603822279816/work\nPyYAML==5.4.1\npyzmq==20.0.0\nQDarkStyle @ file:///tmp/build/80754af9/qdarkstyle_1617386714626/work\nqstylizer @ file:///tmp/build/80754af9/qstylizer_1617713584600/work/dist/qstylizer-0.1.10-py2.py3-none-any.whl\nQtAwesome @ file:///tmp/build/80754af9/qtawesome_1615991616277/work\nqtconsole @ file:///tmp/build/80754af9/qtconsole_1616775094278/work\nQtPy==1.9.0\nrdflib==5.0.0\nregex @ file:///tmp/build/80754af9/regex_1617569202463/work\nrequests @ file:///tmp/build/80754af9/requests_1608241421344/work\nrope @ file:///tmp/build/80754af9/rope_1602264064449/work\nRtree @ file:///tmp/build/80754af9/rtree_1618420845272/work\nruamel-yaml-conda @ file:///tmp/build/80754af9/ruamel_yaml_1616016699510/work\nscikit-image==0.16.2\nscikit-learn @ file:///tmp/build/80754af9/scikit-learn_1614446682169/work\nscipy @ file:///tmp/build/80754af9/scipy_1618855647378/work\nseaborn @ file:///tmp/build/80754af9/seaborn_1608578541026/work\nSecretStorage @ file:///tmp/build/80754af9/secretstorage_1614022784285/work\nSend2Trash @ file:///tmp/build/80754af9/send2trash_1607525499227/work\nsimplegeneric==0.8.1\nsingledispatch @ file:///tmp/build/80754af9/singledispatch_1614366001199/work\nsip==4.19.13\nsix @ file:///tmp/build/80754af9/six_1605205327372/work\nsniffio @ file:///tmp/build/80754af9/sniffio_1614030475067/work\nsnowballstemmer @ file:///tmp/build/80754af9/snowballstemmer_1611258885636/work\nsortedcollections @ file:///tmp/build/80754af9/sortedcollections_1611172717284/work\nsortedcontainers @ file:///tmp/build/80754af9/sortedcontainers_1606865132123/work\nsoupsieve @ file:///tmp/build/80754af9/soupsieve_1616183228191/work\nSphinx @ file:///tmp/build/80754af9/sphinx_1616268783226/work\nsphinxcontrib-applehelp @ file:///home/ktietz/src/ci/sphinxcontrib-applehelp_1611920841464/work\nsphinxcontrib-devhelp @ file:///home/ktietz/src/ci/sphinxcontrib-devhelp_1611920923094/work\nsphinxcontrib-htmlhelp @ file:///home/ktietz/src/ci/sphinxcontrib-htmlhelp_1611920974801/work\nsphinxcontrib-jsmath @ file:///home/ktietz/src/ci/sphinxcontrib-jsmath_1611920942228/work\nsphinxcontrib-qthelp @ file:///home/ktietz/src/ci/sphinxcontrib-qthelp_1611921055322/work\nsphinxcontrib-serializinghtml @ file:///home/ktietz/src/ci/sphinxcontrib-serializinghtml_1611920755253/work\nsphinxcontrib-websupport @ file:///tmp/build/80754af9/sphinxcontrib-websupport_1597081412696/work\nspyder @ file:///tmp/build/80754af9/spyder_1618327905127/work\nspyder-kernels @ file:///tmp/build/80754af9/spyder-kernels_1617396566288/work\nSQLAlchemy @ file:///tmp/build/80754af9/sqlalchemy_1618089170652/work\nstatsmodels @ file:///tmp/build/80754af9/statsmodels_1614023746358/work\nsympy @ file:///tmp/build/80754af9/sympy_1618252284338/work\ntables==3.6.1\ntblib @ file:///tmp/build/80754af9/tblib_1597928476713/work\ntensorboardX==2.2\nterminado==0.9.4\ntestpath @ file:///home/ktietz/src/ci/testpath_1611930608132/work\ntext-unidecode==1.3\ntextdistance @ file:///tmp/build/80754af9/textdistance_1612461398012/work\nthreadpoolctl @ file:///tmp/tmp9twdgx9k/threadpoolctl-2.1.0-py3-none-any.whl\nthree-merge @ file:///tmp/build/80754af9/three-merge_1607553261110/work\ntinycss @ file:///tmp/build/80754af9/tinycss_1617713798712/work\ntoml @ file:///tmp/build/80754af9/toml_1616166611790/work\ntoolz @ file:///home/linux1/recipes/ci/toolz_1610987900194/work\ntorch==1.7.0\ntorch-cluster==1.5.9\ntorch-geometric==1.7.0\ntorch-scatter==2.0.6\ntorch-sparse==0.6.9\ntorch-spline-conv==1.2.1\ntorchaudio==0.7.0a0+ac17b64\ntorchvision==0.8.0\ntornado @ file:///tmp/build/80754af9/tornado_1606942300299/work\ntqdm @ file:///tmp/build/80754af9/tqdm_1615925068909/work\ntraitlets @ file:///home/ktietz/src/ci/traitlets_1611929699868/work\ntsBNgen==1.0.0\ntyped-ast @ file:///tmp/build/80754af9/typed-ast_1610484547928/work\ntyping-extensions @ file:///home/ktietz/src/ci_mi/typing_extensions_1612808209620/work\nujson @ file:///tmp/build/80754af9/ujson_1611259522456/work\nunicodecsv==0.14.1\nUnidecode @ file:///tmp/build/80754af9/unidecode_1614712377438/work\nurllib3 @ file:///tmp/build/80754af9/urllib3_1615837158687/work\nwatchdog @ file:///tmp/build/80754af9/watchdog_1612471027849/work\nwcwidth @ file:///tmp/build/80754af9/wcwidth_1593447189090/work\nwebencodings==0.5.1\nWerkzeug @ file:///home/ktietz/src/ci/werkzeug_1611932622770/work\nwhichcraft @ file:///tmp/build/80754af9/whichcraft_1617751293875/work\nwidgetsnbextension==3.5.1\nwrapt==1.12.1\nwurlitzer @ file:///tmp/build/80754af9/wurlitzer_1617224664226/work\nxlrd @ file:///tmp/build/80754af9/xlrd_1608072521494/work\nXlsxWriter @ file:///tmp/build/80754af9/xlsxwriter_1617224712951/work\nxlwt==1.3.0\nyapf @ file:///tmp/build/80754af9/yapf_1615749224965/work\nzict==2.0.0\nzipp @ file:///tmp/build/80754af9/zipp_1615904174917/work\nzope.event==4.5.0\nzope.interface @ file:///tmp/build/80754af9/zope.interface_1616357211867/work\n"
  }
]