Full Code of bethgelab/model-vs-human for AI

master 79ed7cd1d9ac cached
178 files
8.2 MB
2.2M tokens
410 symbols
1 requests
Download .txt
Showing preview only (8,681K chars total). Download the full file or copy to clipboard to get everything.
Repository: bethgelab/model-vs-human
Branch: master
Commit: 79ed7cd1d9ac
Files: 178
Total size: 8.2 MB

Directory structure:
gitextract_kib475aa/

├── .gitignore
├── README.md
├── examples/
│   ├── evaluate.py
│   └── plotting_definition.py
├── latex-report/
│   ├── assets/
│   │   ├── benchmark_figures.tex
│   │   ├── benchmark_table_accuracy.tex
│   │   ├── benchmark_table_humanlike.tex
│   │   ├── benchmark_tables.tex
│   │   ├── consistency_vs_accuracy.tex
│   │   ├── error_consistency_lineplots.tex
│   │   ├── error_consistency_matrices.tex
│   │   ├── noise_generalisation.tex
│   │   ├── nonparametric_accuracy.tex
│   │   └── shape_bias.tex
│   ├── neurips.sty
│   └── report.tex
├── licenses/
│   ├── CODE_LICENSE
│   ├── LICENSES_OVERVIEW.md
│   └── MODEL_LICENSES
├── modelvshuman/
│   ├── __init__.py
│   ├── cli.py
│   ├── constants.py
│   ├── datasets/
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── create_dataset.py
│   │   ├── dataloaders.py
│   │   ├── dataset_converters.py
│   │   ├── decision_mappings.py
│   │   ├── experiments.py
│   │   ├── imagenet.py
│   │   ├── info_mappings.py
│   │   ├── noise_generalisation.py
│   │   ├── registry.py
│   │   ├── sketch.py
│   │   ├── stylized.py
│   │   └── texture_shape.py
│   ├── evaluation/
│   │   ├── __init__.py
│   │   ├── evaluate.py
│   │   ├── imagenet_labels.txt
│   │   └── metrics.py
│   ├── helper/
│   │   ├── __init__.py
│   │   ├── categories.txt
│   │   ├── human_categories.py
│   │   ├── plotting_helper.py
│   │   └── wordnet_functions.py
│   ├── model_evaluator.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── pytorch/
│   │   │   ├── __init__.py
│   │   │   ├── adversarially_robust/
│   │   │   │   ├── __init__.py
│   │   │   │   └── robust_models.py
│   │   │   ├── bagnets/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── kerasnet.py
│   │   │   │   └── pytorchnet.py
│   │   │   ├── clip/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── imagenet_classes.py
│   │   │   │   └── imagenet_templates.py
│   │   │   ├── model_zoo.py
│   │   │   ├── pycontrast/
│   │   │   │   ├── __init__.py
│   │   │   │   └── pycontrast_resnet50.py
│   │   │   ├── shapenet/
│   │   │   │   ├── __init__.py
│   │   │   │   └── texture_shape_models.py
│   │   │   └── simclr/
│   │   │       ├── __init__.py
│   │   │       ├── cores/
│   │   │       │   ├── __init__.py
│   │   │       │   └── cores.py
│   │   │       ├── utils/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── gdrive.py
│   │   │       │   ├── mlayer.py
│   │   │       │   └── modules.py
│   │   │       └── zoo/
│   │   │           ├── __init__.py
│   │   │           └── simclr.py
│   │   ├── registry.py
│   │   ├── tensorflow/
│   │   │   ├── __init__.py
│   │   │   ├── build_model.py
│   │   │   ├── model_zoo.py
│   │   │   └── tf_hub_model_url.py
│   │   └── wrappers/
│   │       ├── __init__.py
│   │       ├── base.py
│   │       ├── pytorch.py
│   │       └── tensorflow.py
│   ├── plotting/
│   │   ├── __init__.py
│   │   ├── analyses.py
│   │   ├── colors.py
│   │   ├── decision_makers.py
│   │   └── plot.py
│   ├── utils.py
│   └── version.py
├── raw-data/
│   ├── colour/
│   │   ├── colour_subject-01_session_1.csv
│   │   ├── colour_subject-02_session_1.csv
│   │   ├── colour_subject-03_session_1.csv
│   │   └── colour_subject-04_session_1.csv
│   ├── contrast/
│   │   ├── contrast_subject-01_session_1.csv
│   │   ├── contrast_subject-02_session_1.csv
│   │   ├── contrast_subject-03_session_1.csv
│   │   └── contrast_subject-04_session_1.csv
│   ├── cue-conflict/
│   │   ├── cue-conflict_subject-01_session_1.csv
│   │   ├── cue-conflict_subject-02_session_1.csv
│   │   ├── cue-conflict_subject-03_session_1.csv
│   │   ├── cue-conflict_subject-04_session_1.csv
│   │   ├── cue-conflict_subject-05_session_1.csv
│   │   ├── cue-conflict_subject-06_session_1.csv
│   │   ├── cue-conflict_subject-07_session_1.csv
│   │   ├── cue-conflict_subject-08_session_1.csv
│   │   ├── cue-conflict_subject-09_session_1.csv
│   │   └── cue-conflict_subject-10_session_1.csv
│   ├── edge/
│   │   ├── edge_subject-01_session_1.csv
│   │   ├── edge_subject-02_session_1.csv
│   │   ├── edge_subject-03_session_1.csv
│   │   ├── edge_subject-04_session_1.csv
│   │   ├── edge_subject-05_session_1.csv
│   │   ├── edge_subject-06_session_1.csv
│   │   ├── edge_subject-07_session_1.csv
│   │   ├── edge_subject-08_session_1.csv
│   │   ├── edge_subject-09_session_1.csv
│   │   └── edge_subject-10_session_1.csv
│   ├── eidolonI/
│   │   ├── eidolonI_subject-01_session_1.csv
│   │   ├── eidolonI_subject-02_session_1.csv
│   │   ├── eidolonI_subject-03_session_1.csv
│   │   └── eidolonI_subject-04_session_1.csv
│   ├── eidolonII/
│   │   ├── eidolonII_subject-01_session_1.csv
│   │   ├── eidolonII_subject-02_session_1.csv
│   │   ├── eidolonII_subject-03_session_1.csv
│   │   └── eidolonII_subject-04_session_1.csv
│   ├── eidolonIII/
│   │   ├── eidolonIII_subject-01_session_1.csv
│   │   ├── eidolonIII_subject-02_session_1.csv
│   │   ├── eidolonIII_subject-03_session_1.csv
│   │   └── eidolonIII_subject-04_session_1.csv
│   ├── false-colour/
│   │   ├── false-colour_subject-01_session_1.csv
│   │   ├── false-colour_subject-02_session_1.csv
│   │   ├── false-colour_subject-03_session_1.csv
│   │   └── false-colour_subject-04_session_1.csv
│   ├── high-pass/
│   │   ├── high-pass_subject-01_session_1.csv
│   │   ├── high-pass_subject-02_session_1.csv
│   │   ├── high-pass_subject-03_session_1.csv
│   │   └── high-pass_subject-04_session_1.csv
│   ├── low-pass/
│   │   ├── low-pass_subject-01_session_1.csv
│   │   ├── low-pass_subject-02_session_1.csv
│   │   ├── low-pass_subject-03_session_1.csv
│   │   └── low-pass_subject-04_session_1.csv
│   ├── phase-scrambling/
│   │   ├── phase-scrambling_subject-01_session_1.csv
│   │   ├── phase-scrambling_subject-02_session_1.csv
│   │   ├── phase-scrambling_subject-03_session_1.csv
│   │   └── phase-scrambling_subject-04_session_1.csv
│   ├── power-equalisation/
│   │   ├── power-equalisation_subject-01_session_1.csv
│   │   ├── power-equalisation_subject-02_session_1.csv
│   │   ├── power-equalisation_subject-03_session_1.csv
│   │   └── power-equalisation_subject-04_session_1.csv
│   ├── rotation/
│   │   ├── rotation_subject-01_session_1.csv
│   │   ├── rotation_subject-02_session_1.csv
│   │   ├── rotation_subject-03_session_1.csv
│   │   └── rotation_subject-04_session_1.csv
│   ├── silhouette/
│   │   ├── silhouette_subject-01_session_1.csv
│   │   ├── silhouette_subject-02_session_1.csv
│   │   ├── silhouette_subject-03_session_1.csv
│   │   ├── silhouette_subject-04_session_1.csv
│   │   ├── silhouette_subject-05_session_1.csv
│   │   ├── silhouette_subject-06_session_1.csv
│   │   ├── silhouette_subject-07_session_1.csv
│   │   ├── silhouette_subject-08_session_1.csv
│   │   ├── silhouette_subject-09_session_1.csv
│   │   └── silhouette_subject-10_session_1.csv
│   ├── sketch/
│   │   ├── sketch_subject-01_session_1.csv
│   │   ├── sketch_subject-02_session_1.csv
│   │   ├── sketch_subject-03_session_1.csv
│   │   ├── sketch_subject-04_session_1.csv
│   │   ├── sketch_subject-05_session_1.csv
│   │   ├── sketch_subject-06_session_1.csv
│   │   └── sketch_subject-07_session_1.csv
│   ├── stylized/
│   │   ├── stylized_subject-01_session_1.csv
│   │   ├── stylized_subject-02_session_1.csv
│   │   ├── stylized_subject-03_session_1.csv
│   │   ├── stylized_subject-04_session_1.csv
│   │   └── stylized_subject-05_session_1.csv
│   └── uniform-noise/
│       ├── uniform-noise_subject-01_session_1.csv
│       ├── uniform-noise_subject-02_session_1.csv
│       ├── uniform-noise_subject-03_session_1.csv
│       └── uniform-noise_subject-04_session_1.csv
├── setup.cfg
└── setup.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
datasets/*

modelvshuman/.idea/

# matlab
*pathdef.m

# tensorflow
*.h5

*.pickle
*.csv#
*.ods#
*ckpt*
*.pb
*.pbtxt
*ckpt*
*.csv#
*.pth
*.pth.tar

*.swp

## Core latex/pdflatex auxiliary files:
*.aux
*.lof
*.log
*.lot
*.fls
*.out
*.toc
*.fmt
*.fot
*.cb
*.cb2
.*.lb

## Intermediate documents:
*.dvi
*.xdv
*-converted-to.*
# these rules might exclude image files for figures etc.
# *.ps
# *.eps
# *.pdf

## Generated if empty string is given at "Please type another file name for output:"
.pdf

## Bibliography auxiliary files (bibtex/biblatex/biber):
*.bbl
*.bcf
*.blg
*-blx.aux
*-blx.bib
*.run.xml

## Build tool auxiliary files:
*.fdb_latexmk
*.synctex
*.synctex(busy)
*.synctex.gz
*.synctex.gz(busy)
*.pdfsync

## Build tool directories for auxiliary files
# latexrun
latex.out/

## Auxiliary and intermediate files from other packages:
# algorithms
*.alg
*.loa

# achemso
acs-*.bib

# amsthm
*.thm

# beamer
*.nav
*.pre
*.snm
*.vrb

# changes
*.soc

# comment
*.cut

# cprotect
*.cpt

# elsarticle (documentclass of Elsevier journals)
*.spl

# endnotes
*.ent

# fixme
*.lox

# feynmf/feynmp
*.mf
*.mp
*.t[1-9]
*.t[1-9][0-9]
*.tfm

#(r)(e)ledmac/(r)(e)ledpar
*.end
*.?end
*.[1-9]
*.[1-9][0-9]
*.[1-9][0-9][0-9]
*.[1-9]R
*.[1-9][0-9]R
*.[1-9][0-9][0-9]R
*.eledsec[1-9]
*.eledsec[1-9]R
*.eledsec[1-9][0-9]
*.eledsec[1-9][0-9]R
*.eledsec[1-9][0-9][0-9]
*.eledsec[1-9][0-9][0-9]R

# glossaries
*.acn
*.acr
*.glg
*.glo
*.gls
*.glsdefs
*.lzo
*.lzs

# uncomment this for glossaries-extra (will ignore makeindex's style files!)
# *.ist

# gnuplottex
*-gnuplottex-*

# gregoriotex
*.gaux
*.gtex

# htlatex
*.4ct
*.4tc
*.idv
*.lg
*.trc
*.xref

# hyperref
*.brf

# knitr
*-concordance.tex
# TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files
# *.tikz
*-tikzDictionary

# listings
*.lol

# luatexja-ruby
*.ltjruby

# makeidx
*.idx
*.ilg
*.ind

# minitoc
*.maf
*.mlf
*.mlt
*.mtc[0-9]*
*.slf[0-9]*
*.slt[0-9]*
*.stc[0-9]*

# minted
_minted*
*.pyg

# morewrites
*.mw

# nomencl
*.nlg
*.nlo
*.nls

# pax
*.pax

# pdfpcnotes
*.pdfpc

# sagetex
*.sagetex.sage
*.sagetex.py
*.sagetex.scmd

# scrwfile
*.wrt

# sympy
*.sout
*.sympy
sympy-plots-for-*.tex/

# pdfcomment
*.upa
*.upb

# pythontex
*.pytxcode
pythontex-files-*/

# tcolorbox
*.listing

# thmtools
*.loe

# TikZ & PGF
*.dpth
*.md5
*.auxlock

# todonotes
*.tdo

# vhistory
*.hst
*.ver

# easy-todo
*.lod

# xcolor
*.xcp

# xmpincl
*.xmpi

# xindy
*.xdy

# xypic precompiled matrices and outlines
*.xyc
*.xyd

# endfloat
*.ttt
*.fff

# Latexian
TSWLatexianTemp*

## Editors:
# WinEdt
*.bak
*.sav

# Texpad
.texpadtmp

# LyX
*.lyx~

# Kile
*.backup

# gummi
.*.swp

# KBibTeX
*~[0-9]*

# TeXnicCenter
*.tps

# auto folder when using emacs and auctex
./auto/*
*.el

# expex forward references with \gathertags
*-tags.tex

# standalone packages
*.sta

# Makeindex log files
*.lpz

# xwatermark package
*.xwm

# REVTeX puts footnotes in the bibliography by default, unless the nofootinbib
# option is specified. Footnotes are the stored in a file with suffix Notes.bib.
# Uncomment the next line to have this generated file ignored.
#*Notes.bib


*.Rproj
# History files
.Rhistory
.Rapp.history

# Session Data files
.RData

# User-specific files
.Ruserdata

# Example code in package build process
*-Ex.R

# Output files from R CMD build
/*.tar.gz

# Output files from R CMD check
/*.Rcheck/

# RStudio files
.Rproj.user/

# produced vignettes
vignettes/*.html
vignettes/*.pdf

# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3
.httr-oauth

# knitr and R markdown default cache directories
*_cache/
/cache/

# Temporary files created by R markdown
*.utf8.md
*.knit.md

# R Environment Variables
.Renviron

# pkgdown site
docs/


# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
#   For a library or package, you might want to ignore these files since the code is
#   intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
#   However, in case of collaboration, if having platform-specific dependencies or dependencies
#   having no cross-platform support, pipenv may install dependencies that don't work, or not
#   install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

.Rproj.user


================================================
FILE: README.md
================================================
![header](./assets/header/header.png "header")

<p align="center">
  <a href="#trophy-benchmark">Benchmark</a> •
  <a href="#wrench-installation">Installation</a> •
  <a href="#microscope-user-experience">User experience</a> •
  <a href="#camel-model-zoo">Model zoo</a> •
  <a href="#file_folder-datasets">Datasets</a> •
  <a href="#credit_card-credit">Credit & citation</a>
</p>

# modelvshuman: Does your model generalise better than humans?

``modelvshuman`` is a Python toolbox to benchmark the gap between human and machine vision. Using this library, both PyTorch and TensorFlow models can be evaluated on 17 out-of-distribution datasets with high-quality human comparison data.

## :trophy: Benchmark

The top-10 models are listed here; training dataset size is indicated in brackets. Additionally, standard ResNet-50 is included as the last entry of the table for comparison. Model ranks are calculated across the full range of 52 models that we tested. If your model scores better than some (or even all) of the models here, please open a pull request and we'll be happy to include it here!

### Most human-like behaviour
winner            | model                           | accuracy difference &#8595;  | observed consistency &#8593; | error consistency &#8593;     | mean rank &#8595;      |
:----------------:|  ------------------------------ |-----------------------------:|-----------------------------:|------------------------------:|-----------------------:|
:1st_place_medal: |  [ViT-22B-384](https://arxiv.org/abs/2302.05442): ViT-22B (4B)      |                     **.018** |                     **.783** |                          .258 |                 **1.67**|
:2nd_place_medal: |  [CLIP](https://arxiv.org/abs/2103.00020): ViT-B (400M)             |                         .023 |                         .758 |                      **.281** |                        3|
:3rd_place_medal: |  [ViT-22B-560](https://arxiv.org/abs/2302.05442): ViT-22B (4B)      |                         .022 |                         .739 |                      **.281** |                     3.33|
:clap: |  [SWSL](https://arxiv.org/abs/1905.00546): ResNeXt-101 (940M)                  |                         .028 |                         .752 |                          .237 |                        6|
:clap: |  [BiT-M](https://arxiv.org/abs/1912.11370): ResNet-101x1 (14M)                 |                         .034 |                         .733 |                          .252 |                        7|
:clap:            |  [BiT-M](https://arxiv.org/abs/1912.11370): ResNet-152x2 (14M)      |                         .035 |                         .737 |                          .243 |                     7.67|
:clap:            |  [ViT-L](https://openreview.net/forum?id=YicbFdNTTy) (1M)           |                         .033 |                         .738 |                          .222 |                     9.33|
:clap:            |  [BiT-M](https://arxiv.org/abs/1912.11370): ResNet-152x4 (14M)      |                         .035 |                         .732 |                          .233 |                    10.33|
:clap:            |  [BiT-M](https://arxiv.org/abs/1912.11370): ResNet-50x3 (14M)       |                         .040 |                         .726 |                          .228 |                       12|
:clap:            |  [ViT-L](https://openreview.net/forum?id=YicbFdNTTy) (14M)          |                         .035 |                         .744 |                          .206 |                       12|
...               |  standard [ResNet-50](https://arxiv.org/abs/1502.01852) (1M)        |                         .087 |                         .665 |                          .208 |                    31.33|

### Highest OOD (out-of-distribution) distortion robustness

winner            |  model                                                                       |   OOD accuracy &#8593;    |   rank &#8595;    |
:----------------:|  ----------------------------------------------------------------------------| -------------------------:|------------------:|
:1st_place_medal: |  [ViT-22B-224](https://arxiv.org/abs/2302.05442): ViT-22B (4B)               |                  **.837** |              **1**|
:2nd_place_medal: |  [Noisy Student](https://arxiv.org/abs/1911.04252): EfficientNet-L2 (300M)   |                      .829 |                  2|
:3rd_place_medal: |  [ViT-22B-384](https://arxiv.org/abs/2302.05442): ViT-22B (4B)               |                      .798 |                  3|
:clap:            |  [ViT-L](https://openreview.net/forum?id=YicbFdNTTy) (14M)                   |                      .733 |                  4|
:clap:            |  [CLIP](https://arxiv.org/abs/2103.00020): ViT-B (400M)                      |                      .708 |                  5|
:clap:            |  [ViT-L](https://openreview.net/forum?id=YicbFdNTTy) (1M)                    |                      .706 |                  6|
:clap:            |  [SWSL](https://arxiv.org/abs/1905.00546): ResNeXt-101 (940M)                |                      .698 |                  7|
:clap:            |  [BiT-M](https://arxiv.org/abs/1912.11370): ResNet-152x2 (14M)               |                      .694 |                  8|
:clap:            |  [BiT-M](https://arxiv.org/abs/1912.11370): ResNet-152x4 (14M)               |                      .688 |                  9|
:clap:            |  [BiT-M](https://arxiv.org/abs/1912.11370): ResNet-101x3 (14M)               |                      .682 |                 10|
...               |  standard [ResNet-50](https://arxiv.org/abs/1502.01852) (1M)                 |                      .559 |                 34|

## :wrench: Installation

Simply clone the repository to a location of your choice and follow these steps (requires ``python3.8``):


1. Set the repository home path by running the following from the command line:

    ```
    export MODELVSHUMANDIR=/absolute/path/to/this/repository/
    ```

2. Within the cloned repository, install package:

    ```
    pip install -e .
    ```
    
    (The -e option makes sure that changes to the code are reflected in the package, which is important e.g. if you add your own model or make any other changes)

## :microscope: User experience

Simply edit ``examples/evaluate.py`` as desired. This will test a list of models on out-of-distribution datasets, generating plots. If you then compile ``latex-report/report.tex``, all the plots will be included in one convenient PDF report.



## :camel: Model zoo

The following models are currently implemented:

- [x] 20+ standard supervised models from the [torchvision model zoo](https://pytorch.org/docs/1.4.0/torchvision/models.html)
- [x] 5 self-supervised contrastive models (InsDis, MoCo, MoCoV2, InfoMin, PIRL) from the [pycontrast repo](https://github.com/HobbitLong/PyContrast/)
- [x] 3 self-supervised contrastive SimCLR model variants (simclr_resnet50x1, simclr_resnet50x2, simclr_resnet50x4) from the [ptrnet repo](https://github.com/sacadena/ptrnets)
- [x] 3 vision transformer variants (vit_small_patch16_224, vit_base_patch16_224 and vit_large_patch16_224) from the [pytorch-image-models repo](https://github.com/rwightman/pytorch-image-models)
- [x] 10 adversarially "robust" models from [robust-models-transfer](https://arxiv.org/abs/2007.08489) implemented via the [ptrnet repo](https://github.com/sacadena/ptrnets)
- [x] 3 "ShapeNet" ResNet-50 models with different degree of stylized training from the [texture-vs-shape repo](https://github.com/rgeirhos/texture-vs-shape)
- [x] 3 BagNets models from the [BagNets repo](https://github.com/wielandbrendel/bag-of-local-features-models#bagnets)
- [x] 1 semi-supervised ResNet-50 model pre-trained on 940M images from the [semi-supervised-ImageNet1K-models repo](https://github.com/facebookresearch/semi-supervised-ImageNet1K-models)
- [x] 6 Big Transfer models from the [pytorch-image-models repo](https://github.com/rwightman/pytorch-image-models)

If you e.g. add/implement your own model, please make sure to compute the ImageNet accuracy as a sanity check.


##### How to load a model
If you just want to load a model from the model zoo, this is what you can do:

```python
    # loading a PyTorch model from the zoo
    from modelvshuman.models.pytorch.model_zoo import InfoMin
    model = InfoMin("InfoMin")

    # loading a Tensorflow model from the zoo
    from modelvshuman.models.tensorflow.model_zoo import efficientnet_b0
    model = efficientnet_b0("efficientnet_b0")
```

Then, if you have a custom set of images that you want to evaluate the model on, load those (in the example below, called ``images``) and evaluate via:

```python
    output_numpy = model.forward_batch(images)
    
    # by default, type(output) is numpy.ndarray, which can be converted to a tensor via:
    output_tensor = torch.tensor(output_numpy)
```

However, if you simply want to run a model through the generalisation datasets provided by the toolbox, we recommend to check the section on User experience.

##### How to list all available models

All implemented models are registered by the model registry, which can then be used to list all available models of a certain framework with the following method:

```python
    from modelvshuman import models
    
    print(models.list_models("pytorch"))
    print(models.list_models("tensorflow"))
```

##### How to add a new model
Adding a new model is possible for standard PyTorch and TensorFlow models. Depending on the framework (pytorch / tensorflow), open ``modelvshuman/models/<framework>/model_zoo.py``. Here, you can add your own model with a few lines of code - similar to how you would load it usually. If your model has a custom model definition, create a new subdirectory called ``modelvshuman/models/<framework>/my_fancy_model/fancy_model.py`` which you can then import from ``model_zoo.py`` via ``from .my_fancy_model import fancy_model``.


## :file_folder: Datasets
In total, 17 datasets with human comparison data collected under highly controlled laboratory conditions in the [Wichmannlab](http://www.wichmannlab.org) are available.

Twelve datasets correspond to parametric or binary image distortions. Top row: colour/grayscale, contrast, high-pass, low-pass (blurring), phase noise, power equalisation. Bottom row: opponent colour, rotation, Eidolon I, II and III, uniform noise.
![noise-stimuli](./assets/stimuli_visualizations/noise-stimuli-figure/all_noise-generalisation_stimuli.png  "noise-stimuli")

The remaining five datasets correspond to the following nonparametric image manipulations: sketch, stylized, edge, silhouette, texture-shape cue conflict.
![nonparametric-stimuli](./assets/stimuli_visualizations/nonparametric-stimuli-figure/all_nonparametric_stimuli.png  "nonparametric-stimuli")

##### How to load a dataset
Similarly, if you're interested in just loading a dataset, you can do this via:
```python
   from modelvshuman.datasets import sketch      
   dataset = sketch(batch_size=16, num_workers=4)
```
Note that the datasets aren't available after installing the toolbox just yet. Instead, they are automatically downloaded the first time a model is evaluated on the dataset (see ``examples/evaluate.py``).

##### How to list all available datasets
```python
    from modelvshuman import datasets
    
    print(list(datasets.list_datasets().keys()))
```

##### Download raw test images
If you'd like to download the test images yourself, they are availabel [here](https://github.com/bethgelab/model-vs-human/releases/tag/v0.1).

## :credit_card: Credit

Psychophysical data were collected by us in the vision laboratory of the [Wichmannlab](http://www.wichmannlab.org).

That said, we used existing image dataset sources. 12 datasets were obtained from [Generalisation in humans and deep neural networks](http://papers.nips.cc/paper/7982-generalisation-in-humans-and-deep-neural-networks.pdf). 4 datasets were obtained from [ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness](https://openreview.net/forum?id=Bygh9j09KX). Additionally, we used 1 dataset from [Learning Robust Global Representations by Penalizing Local Predictive Power](https://arxiv.org/abs/1905.13549) (sketch images from ImageNet-Sketch).

We thank all model authors and repository maintainers for providing the models described above.

### Citation

    @inproceedings{geirhos2021partial,
      title={Partial success in closing the gap between human and machine vision},
      author={Geirhos, Robert and Narayanappa, Kantharaju and Mitzkus, Benjamin and Thieringer, Tizian and Bethge, Matthias and Wichmann, Felix A and Brendel, Wieland},
      booktitle={{Advances in Neural Information Processing Systems 34}},
      year={2021},
    }


================================================
FILE: examples/evaluate.py
================================================
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)

from modelvshuman import Plot, Evaluate
from modelvshuman import constants as c
from plotting_definition import plotting_definition_template


def run_evaluation():
    models = ["resnet50", "bagnet33", "simclr_resnet50x1"]
    datasets = c.DEFAULT_DATASETS # or e.g. ["cue-conflict", "uniform-noise"]
    params = {"batch_size": 64, "print_predictions": True, "num_workers": 20}
    Evaluate()(models, datasets, **params)


def run_plotting():
    plot_types = c.DEFAULT_PLOT_TYPES # or e.g. ["accuracy", "shape-bias"]
    plotting_def = plotting_definition_template
    figure_dirname = "example-figures/"
    Plot(plot_types = plot_types, plotting_definition = plotting_def,
         figure_directory_name = figure_dirname)

    # In examples/plotting_definition.py, you can edit
    # plotting_definition_template as desired: this will let
    # the toolbox know which models to plot, and which colours to use etc.


if __name__ == "__main__":
    # 1. evaluate models on out-of-distribution datasets
    run_evaluation()
    # 2. plot the evaluation results
    run_plotting()


================================================
FILE: examples/plotting_definition.py
================================================
# /!usr/bin/env python3

"""
Define decision makers (either human participants or CNN models).
"""

from modelvshuman import constants as c
from modelvshuman.plotting.colors import *
from modelvshuman.plotting.decision_makers import DecisionMaker


def plotting_definition_template(df):
    """Decision makers to compare a few models with human observers.

    This exemplary definition can be adapted for the
    desired purpose, e.g. by adding more/different models.

    Note that models will need to be evaluated first, before
    their data can be plotted.

    For each model, define:
    - a color using rgb(42, 42, 42)
    - a plotting symbol by setting marker;
      a list of markers can be found here:
      https://matplotlib.org/3.1.0/api/markers_api.html
    """

    decision_makers = []

    decision_makers.append(DecisionMaker(name_pattern="resnet50",
                           color=rgb(65, 90, 140), marker="o", df=df,
                           plotting_name="ResNet-50"))
    decision_makers.append(DecisionMaker(name_pattern="bagnet33",
                           color=rgb(110, 110, 110), marker="o", df=df,
                           plotting_name="BagNet-33"))
    decision_makers.append(DecisionMaker(name_pattern="simclr_resnet50x1",
                           color=rgb(210, 150, 0), marker="o", df=df,
                           plotting_name="SimCLR-x1"))
    decision_makers.append(DecisionMaker(name_pattern="subject-*",
                           color=rgb(165, 30, 55), marker="D", df=df,
                           plotting_name="humans"))
    return decision_makers


def get_comparison_decision_makers(df, include_humans=True,
                                   humans_last=True):
    """Decision makers used in our paper."""

    d = []

    # 1. supervised models
    for model in c.TORCHVISION_MODELS:
        d.append(DecisionMaker(name_pattern=model,
                               color=rgb(230, 230, 230), df=df,
                               plotting_name=model))

    # 2. self-supervised models
    for model in c.PYCONTRAST_MODELS:
        d.append(DecisionMaker(name_pattern=model,
                               color=orange2, marker="o", df=df,
                               plotting_name=model+": ResNet-50"))
    d.append(DecisionMaker(name_pattern="simclr_resnet50x1",
                           color=orange2, marker="o", df=df,
                           plotting_name="SimCLR: ResNet-50x1"))
    d.append(DecisionMaker(name_pattern="simclr_resnet50x2",
                           color=orange2, marker="o", df=df,
                           plotting_name="SimCLR: ResNet-50x2"))
    d.append(DecisionMaker(name_pattern="simclr_resnet50x4",
                           color=orange2, marker="o", df=df,
                           plotting_name="SimCLR: ResNet-50x4"))


    # 3. adversarially robust models
    d += [DecisionMaker(name_pattern="resnet50_l2_eps0",
                        color=rgb(196, 205, 229), marker="o", df=df,
                        plotting_name="ResNet-50 L2 eps 0.0"),
          DecisionMaker(name_pattern="resnet50_l2_eps0_5",
                        color=rgb(176, 190, 220), marker="o", df=df,
                        plotting_name="ResNet-50 L2 eps 0.5"),
          DecisionMaker(name_pattern="resnet50_l2_eps1",
                        color=rgb(134, 159, 203), marker="o", df=df,
                        plotting_name="ResNet-50 L2 eps 1.0"),
          DecisionMaker(name_pattern="resnet50_l2_eps3",
                        color=rgb(86, 130, 186), marker="o", df=df,
                        plotting_name="ResNet-50 L2 eps 3.0"),
          DecisionMaker(name_pattern="resnet50_l2_eps5",
                        color=blue2, marker="o", df=df,
                        plotting_name="ResNet-50 L2 eps 5.0")]

    # 4. vision transformers without large-scale pretraining
    d.append(DecisionMaker(name_pattern="vit_small_patch16_224",
                           color=rgb(144, 159, 110), marker="v", df=df,
                           plotting_name="ViT-S"))
    d.append(DecisionMaker(name_pattern="vit_base_patch16_224",
                           color=rgb(144, 159, 110), marker="v", df=df,
                           plotting_name="ViT-B"))
    d.append(DecisionMaker(name_pattern="vit_large_patch16_224",
                           color=rgb(144, 159, 110), marker="v", df=df,
                           plotting_name="ViT-L"))

    if not humans_last:
        if include_humans:
            d.append(DecisionMaker(name_pattern="subject-*",
                                   color=red, marker="D", df=df,
                                   plotting_name="humans"))
        d.append(DecisionMaker(name_pattern="clip",
                               color=brown1, marker="v", df=df,
                               plotting_name="CLIP: ViT-B (400M)"))
 
    d.append(DecisionMaker(name_pattern="ResNeXt101_32x16d_swsl",
                           color=purple1, marker="o", df=df,
                           plotting_name="SWSL: ResNeXt-101 (940M)"))
    d.append(DecisionMaker(name_pattern="resnet50_swsl",
                           color=purple1, marker="o", df=df,
                           plotting_name="SWSL: ResNet-50 (940M)"))
 
    bitm_col = rgb(153, 142, 195) 
    d.append(DecisionMaker(name_pattern="BiTM_resnetv2_152x4",
                           color=bitm_col, marker="o", df=df,
                           plotting_name="BiT-M: ResNet-152x4 (14M)"))
    d.append(DecisionMaker(name_pattern="BiTM_resnetv2_152x2",
                           color=bitm_col, marker="o", df=df,
                           plotting_name="BiT-M: ResNet-152x2 (14M)"))
    d.append(DecisionMaker(name_pattern="BiTM_resnetv2_101x3",
                           color=bitm_col, marker="o", df=df,
                           plotting_name="BiT-M: ResNet-101x3 (14M)"))
    d.append(DecisionMaker(name_pattern="BiTM_resnetv2_101x1",
                           color=bitm_col, marker="o", df=df,
                           plotting_name="BiT-M: ResNet-101x1 (14M)"))
    d.append(DecisionMaker(name_pattern="BiTM_resnetv2_50x3",
                           color=bitm_col, marker="o", df=df,
                           plotting_name="BiT-M: ResNet-50x3 (14M)"))
    d.append(DecisionMaker(name_pattern="BiTM_resnetv2_50x1",
                           color=bitm_col, marker="o", df=df,
                           plotting_name="BiT-M: ResNet-50x1 (14M)"))

    d.append(DecisionMaker(name_pattern="transformer_L16_IN21K",
                           color=green1, marker="v", df=df,
                           plotting_name="ViT-L (14M)"))
    d.append(DecisionMaker(name_pattern="transformer_B16_IN21K",
                           color=green1, marker="v", df=df,
                           plotting_name="ViT-B (14M)"))

    d.append(DecisionMaker(name_pattern="efficientnet_l2_noisy_student_475",
                           color=metallic, marker="o", df=df,
                           plotting_name="Noisy Student: ENetL2 (300M)"))
 
    if humans_last:
        d.append(DecisionMaker(name_pattern="clip",
                               color=brown1, marker="v", df=df,
                               plotting_name="CLIP: ViT-B (400M)"))
        if include_humans:
            d.append(DecisionMaker(name_pattern="subject-*",
                                   color=red, marker="D", df=df,
                                   plotting_name="humans"))

    return d


================================================
FILE: latex-report/assets/benchmark_figures.tex
================================================
\begin{figure}[h]
	\begin{subfigure}{0.49\linewidth}
		\centering
		\includegraphics[width=\linewidth]{benchmark_16-class-accuracy.pdf}
		%\vspace{\captionspaceBenchmark}
		\caption{OOD accuracy (higher = better).}
		\label{subfig:benchmark_a}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{0.49\linewidth}
		\centering
		\includegraphics[width=\linewidth]{benchmark_16-class-accuracy-difference.pdf}
		%\vspace{\captionspaceBenchmark}
		\caption{Accuracy difference (lower = better).}
		\label{subfig:benchmark_b}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{0.49\linewidth}
		\centering
		\includegraphics[width=\linewidth]{benchmark_observed-consistency.pdf}
		%\vspace{\captionspaceBenchmark}
		\caption{Observed consistency (higher = better).}			\label{subfig:benchmark_c}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{0.49\linewidth}
		\centering
		\includegraphics[width=\linewidth]{benchmark_error-consistency.pdf}
		%\vspace{\captionspaceBenchmark}
		\caption{Error consistency (higher = better).}			\label{subfig:benchmark_d}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\caption{Benchmark results for different models, aggregated over datasets.}
	\label{fig:benchmark_barplots}
\end{figure}


================================================
FILE: latex-report/assets/benchmark_table_accuracy.tex
================================================
\begin{tabular}{lrr}
\toprule
    model & OOD accuracy $\uparrow$ & rank $\downarrow$ \\
\midrule
SimCLR-x1 &          \textbf{0.596} &    \textbf{1.000} \\
ResNet-50 &                   0.559 &             2.000 \\
BagNet-33 &                   0.398 &             3.000 \\
\bottomrule
\end{tabular}



================================================
FILE: latex-report/assets/benchmark_table_humanlike.tex
================================================
\begin{tabular}{lrrrr}
\toprule
    model & accuracy diff. $\downarrow$ & obs. consistency $\uparrow$ & error consistency $\uparrow$ & mean rank $\downarrow$ \\
\midrule
SimCLR-x1 &              \textbf{0.080} &              \textbf{0.667} &                        0.179 &         \textbf{1.333} \\
ResNet-50 &                       0.087 &                       0.665 &               \textbf{0.208} &                  1.667 \\
BagNet-33 &                       0.156 &                       0.567 &                        0.137 &                  3.000 \\
\bottomrule
\end{tabular}



================================================
FILE: latex-report/assets/benchmark_tables.tex
================================================
\begin{table}[ht]
	\caption{Benchmark table of model results for most human-like behaviour. The three metrics ``accuracy difference'' ``observed consistency'' and ``error consistency'' (plotted in Figure~\ref{fig:benchmark_barplots}) each produce a different model ranking. The mean rank of a model across those three metrics is used to rank the models on our benchmark.}
	\label{tab:benchmark_table_humanlike}
	\centering
	\input{assets/benchmark_table_humanlike.tex}
\end{table}

\begin{table}[h!]
	\caption{Benchmark table of model results for highest out-of-distribution robustness.}
	\label{tab:benchmark_table_accurate}
	\centering
	\input{assets/benchmark_table_accuracy.tex}
\end{table}




================================================
FILE: latex-report/assets/consistency_vs_accuracy.tex
================================================
\begin{figure}[h]
    \centering
    \begin{subfigure}{0.45\textwidth}
        \centering
        \includegraphics[width=\linewidth]{scatter-plot_OOD-accuracy_vs_observed-consistency_multiple-datasets.pdf}
        %\vspace{-0.1cm}
        \caption{Out-of-distribution accuracy vs.\\observed consistency}
        \label{subfig:error_consistency_12_datasets}
    \end{subfigure}
    \begin{subfigure}{0.45\textwidth}
        \centering
        \includegraphics[width=\linewidth]{scatter-plot_OOD-accuracy_vs_error-consistency_multiple-datasets.pdf}
        %\vspace{-0.1cm}
        \caption{Out-of-distribution accuracy vs.\\error consistency}
        \label{subfig:error_consistency_5_datasets}
    \end{subfigure}
    \label{fig:error_consistency_12_and_5_datasets}
    \vspace{-0.1cm}
    \caption{Observed consistency and error consistency between models and humans as a function of out-of-distribution (OOD) accuracy. Dotted lines indicate consistency expected by chance.}
\end{figure}

================================================
FILE: latex-report/assets/error_consistency_lineplots.tex
================================================
\begin{figure}
	%	% colour vs. greyscale | true vs. false colour
	\begin{subfigure}{\figwidth}
			\centering
			\textbf{Accuracy}\\
			\includegraphics[width=\linewidth]{colour_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Colour vs. greyscale}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\textbf{Error consistency}\\
			\includegraphics[width=\linewidth]{colour_error-consistency.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\textbf{Accuracy}\\
			\includegraphics[width=\linewidth]{false-colour_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{True vs. false colour}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\textbf{Error consistency}\\
			\includegraphics[width=\linewidth]{false-colour_error-consistency.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
	
	% uniform noise | low pass
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{uniform-noise_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Uniform noise}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{uniform-noise_error-consistency.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{low-pass_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Low-pass}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{low-pass_error-consistency.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		
	%	% contrast | high-pass
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{contrast_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Contrast}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{contrast_error-consistency.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{high-pass_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{High-pass}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{high-pass_error-consistency.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
	
	    % eidolon I | phase noise
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{eidolonI_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Eidolon I}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering        \includegraphics[width=\linewidth]{eidolonI_error-consistency.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{phase-scrambling_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Phase noise}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{phase-scrambling_error-consistency.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		
	%	eidolon II | power equalisation
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{eidolonII_OOD-accuracy.pdf}
		\vspace{\captionspace}
		\caption{Eidolon II}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{eidolonII_error-consistency.pdf}
		\vspace{\captionspace}
		\caption*{}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{power-equalisation_OOD-accuracy.pdf}
		\vspace{\captionspace}
		\caption{Power equalisation}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{power-equalisation_error-consistency.pdf}
		\vspace{\captionspace}
		\caption*{}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	
	% eidolon III | rotation
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{eidolonIII_OOD-accuracy.pdf}
		\vspace{\captionspace}
		\caption{Eidolon III}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering        \includegraphics[width=\linewidth]{eidolonIII_error-consistency.pdf}
		\vspace{\captionspace}
		\caption*{}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{rotation_OOD-accuracy.pdf}
		\vspace{\captionspace}
		\caption{Rotation}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{rotation_error-consistency.pdf}
		\vspace{\captionspace}
		\caption*{}
	\end{subfigure}\hfill
	\caption{OOD accuracy and error consistency.}
	\label{fig:results_accuracy_error_consistency}
\end{figure}

================================================
FILE: latex-report/assets/error_consistency_matrices.tex
================================================
\begin{figure}
	\centering
	\includegraphics[width=0.8\linewidth]{sketch_error-consistency_matrix.pdf}
	\caption{Error consistency for `sketch' images.}
	\label{fig:error_consistency_matrix_sketch}
\end{figure}

\begin{figure}
    \centering
    \includegraphics[width=0.8\linewidth]{stylized_error-consistency_matrix.pdf}
    \caption{Error consistency for `stylized' images.}
    \label{fig:error_consistency_matrix_stylized}
\end{figure}

\begin{figure}
	\centering
	\includegraphics[width=0.8\linewidth]{edge_error-consistency_matrix.pdf}
	\caption{Error consistency for `edge' images.}
	\label{fig:error_consistency_matrix_edges}
\end{figure}

\begin{figure}
	\centering
	\includegraphics[width=0.8\linewidth]{silhouette_error-consistency_matrix.pdf}
	\caption{Error consistency for `silhouette' images.}
	\label{fig:error_consistency_matrix_silhouettes}
\end{figure}

\begin{figure}
	\centering
	\includegraphics[width=0.8\linewidth]{cue-conflict_error-consistency_matrix.pdf}
	\caption{Error consistency for `cue conflict' images.}
	\label{fig:error_consistency_matrix_cue-conflict}
\end{figure}



================================================
FILE: latex-report/assets/noise_generalisation.tex
================================================
\begin{figure}
	%	% colour vs. greyscale | true vs. false colour
	\begin{subfigure}{\figwidth}
			\centering
			\textbf{Accuracy}\\
			\includegraphics[width=\linewidth]{colour_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Colour vs. greyscale}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\textbf{Entropy}\\
			\includegraphics[width=\linewidth]{colour_entropy.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\textbf{Accuracy}\\
			\includegraphics[width=\linewidth]{false-colour_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{True vs. false colour}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\textbf{Entropy}\\
			\includegraphics[width=\linewidth]{false-colour_entropy.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
	
	% uniform noise | low pass
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{uniform-noise_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Uniform noise}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{uniform-noise_entropy.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{low-pass_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Low-pass}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{low-pass_entropy.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		
	%	% contrast | high-pass
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{contrast_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Contrast}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{contrast_entropy.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{high-pass_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{High-pass}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{high-pass_entropy.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
	
	    % eidolon I | phase noise
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{eidolonI_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Eidolon I}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering        \includegraphics[width=\linewidth]{eidolonI_entropy.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{phase-scrambling_OOD-accuracy.pdf}
			\vspace{\captionspace}
			\caption{Phase noise}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		\begin{subfigure}{\figwidth}
			\centering
			\includegraphics[width=\linewidth]{phase-scrambling_entropy.pdf}
			\vspace{\captionspace}
			\caption*{}
			\vspace{\captionspaceII}
		\end{subfigure}\hfill
		
	%	eidolon II | power equalisation
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{eidolonII_OOD-accuracy.pdf}
		\vspace{\captionspace}
		\caption{Eidolon II}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{eidolonII_entropy.pdf}
		\vspace{\captionspace}
		\caption*{}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{power-equalisation_OOD-accuracy.pdf}
		\vspace{\captionspace}
		\caption{Power equalisation}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{power-equalisation_entropy.pdf}
		\vspace{\captionspace}
		\caption*{}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	
	% eidolon III | rotation
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{eidolonIII_OOD-accuracy.pdf}
		\vspace{\captionspace}
		\caption{Eidolon III}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering        \includegraphics[width=\linewidth]{eidolonIII_entropy.pdf}
		\vspace{\captionspace}
		\caption*{}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{rotation_OOD-accuracy.pdf}
		\vspace{\captionspace}
		\caption{Rotation}
	\end{subfigure}\hfill
	\begin{subfigure}{\figwidth}
		\centering
		\includegraphics[width=\linewidth]{rotation_entropy.pdf}
		\vspace{\captionspace}
		\caption*{}
	\end{subfigure}\hfill
	\caption{Noise generalisation results.}
	\label{fig:results_accuracy_entropy}
\end{figure}

================================================
FILE: latex-report/assets/nonparametric_accuracy.tex
================================================
\begin{figure}[h]
	\begin{subfigure}{0.49\linewidth}
		\centering
		\includegraphics[width=\linewidth]{sketch\_OOD-accuracy.pdf}
		\caption{Accuracy on `sketch' images.}
		\label{subfig:accuracy_sketch}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{0.49\linewidth}
		\centering
		\includegraphics[width=\linewidth]{edge\_OOD-accuracy.pdf}
		\caption{Accuracy on `edge' images.}
		\label{subfig:accuracy_edge}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{0.49\linewidth}
		\centering
		\includegraphics[width=\linewidth]{silhouette\_OOD-accuracy.pdf}
		\caption{Accuracy on `silhouette' images.}
		\label{subfig:accuracy_silhouette}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\begin{subfigure}{0.49\linewidth}
		\centering
		\includegraphics[width=\linewidth]{stylized\_OOD-accuracy.pdf}
		\caption{Accuracy on `stylized' images.}			
		\label{subfig:accuracy_stylized}
		\vspace{\captionspaceII}
	\end{subfigure}\hfill
	\caption{OOD accuracy on four nonparametric datasets (i.e., datasets with only a single corruption type and strength).}
	\label{fig:results_accuracy_nonparametric}
\end{figure}


================================================
FILE: latex-report/assets/shape_bias.tex
================================================
\begin{figure}[h]
	\includegraphics[width=\linewidth]{cue-conflict_shape-bias_matrixplot.pdf}
	\caption{Shape vs.\ texture bias: category-level plot.}
\end{figure}\hfill


\begin{figure}[h]
	\includegraphics[width=\linewidth]{cue-conflict_shape-bias_boxplot.pdf}
	\caption{Shape vs.\ texture bias: boxplot.}
\end{figure}\hfill


================================================
FILE: latex-report/neurips.sty
================================================
% partial rewrite of the LaTeX2e package for submissions to the
% Conference on Neural Information Processing Systems (NeurIPS):
%
% - uses more LaTeX conventions
% - line numbers at submission time replaced with aligned numbers from
%   lineno package
% - \nipsfinalcopy replaced with [final] package option
% - automatically loads times package for authors
% - loads natbib automatically; this can be suppressed with the
%   [nonatbib] package option
% - adds foot line to first page identifying the conference
% - adds preprint option for submission to e.g. arXiv
% - conference acronym modified
%
% Roman Garnett (garnett@wustl.edu) and the many authors of
% nips15submit_e.sty, including MK and drstrip@sandia
%
% last revision: January 2020

\NeedsTeXFormat{LaTeX2e}
\ProvidesPackage{neurips}[NeurIPS style file]

% declare final option, which creates camera-ready copy
\newif\if@neuripsfinal\@neuripsfinalfalse
\DeclareOption{final}{
  \@neuripsfinaltrue
}

% declare nonatbib option, which does not load natbib in case of
% package clash (users can pass options to natbib via
% \PassOptionsToPackage)
\newif\if@natbib\@natbibtrue
\DeclareOption{nonatbib}{
  \@natbibfalse
}

% declare preprint option, which creates a preprint version ready for
% upload to, e.g., arXiv
\newif\if@preprint\@preprintfalse
\DeclareOption{preprint}{
  \@preprinttrue
}

\ProcessOptions\relax

% determine whether this is an anonymized submission
\newif\if@submission\@submissiontrue
\if@neuripsfinal\@submissionfalse\fi
\if@preprint\@submissionfalse\fi

% fonts
\renewcommand{\rmdefault}{ptm}
\renewcommand{\sfdefault}{phv}

% change this every year for notice string at bottom
\newcommand{\@neuripsordinal}{34th}
\newcommand{\@neuripsyear}{2020}
\newcommand{\@neuripslocation}{Vancouver, Canada}

% acknowledgments
\usepackage{environ}
\newcommand{\acksection}{\section*{Acknowledgments and Disclosure of Funding}}
\NewEnviron{ack}{%
  \acksection
  \BODY
}

% handle tweaks for camera-ready copy vs. submission copy
\if@preprint
  \newcommand{\@noticestring}{%
    Generalisation report.%
  }
\else
  \if@neuripsfinal
    \newcommand{\@noticestring}{%
      \@neuripsordinal\/ Conference on Neural Information Processing Systems
      (NeurIPS \@neuripsyear), \@neuripslocation.%
    }
  \else
    \newcommand{\@noticestring}{%
      Generalisation report.%
    }

    % hide the acknowledgements
    \NewEnviron{hide}{}
    \let\ack\hide
    \let\endack\endhide
    
    % line numbers for submission
    \RequirePackage{lineno}
    \linenumbers

    % fix incompatibilities between lineno and amsmath, if required, by
    % transparently wrapping linenomath environments around amsmath
    % environments
    \AtBeginDocument{%
      \@ifpackageloaded{amsmath}{%
        \newcommand*\patchAmsMathEnvironmentForLineno[1]{%
          \expandafter\let\csname old#1\expandafter\endcsname\csname #1\endcsname
          \expandafter\let\csname oldend#1\expandafter\endcsname\csname end#1\endcsname
          \renewenvironment{#1}%
                           {\linenomath\csname old#1\endcsname}%
                           {\csname oldend#1\endcsname\endlinenomath}%
        }%
        \newcommand*\patchBothAmsMathEnvironmentsForLineno[1]{%
          \patchAmsMathEnvironmentForLineno{#1}%
          \patchAmsMathEnvironmentForLineno{#1*}%
        }%
        \patchBothAmsMathEnvironmentsForLineno{equation}%
        \patchBothAmsMathEnvironmentsForLineno{align}%
        \patchBothAmsMathEnvironmentsForLineno{flalign}%
        \patchBothAmsMathEnvironmentsForLineno{alignat}%
        \patchBothAmsMathEnvironmentsForLineno{gather}%
        \patchBothAmsMathEnvironmentsForLineno{multline}%
      }{}
    }
  \fi
\fi

% load natbib unless told otherwise
\if@natbib
  \RequirePackage{natbib}
\fi

% set page geometry
\usepackage[verbose=true,letterpaper]{geometry}
\AtBeginDocument{
  \newgeometry{
    textheight=9in,
    textwidth=5.5in,
    top=1in,
    headheight=12pt,
    headsep=25pt,
    footskip=30pt
  }
  \@ifpackageloaded{fullpage}
    {\PackageWarning{neurips}{fullpage package not allowed! Overwriting formatting.}}
    {}
}

\widowpenalty=10000
\clubpenalty=10000
\flushbottom
\sloppy

% font sizes with reduced leading
\renewcommand{\normalsize}{%
  \@setfontsize\normalsize\@xpt\@xipt
  \abovedisplayskip      7\p@ \@plus 2\p@ \@minus 5\p@
  \abovedisplayshortskip \z@ \@plus 3\p@
  \belowdisplayskip      \abovedisplayskip
  \belowdisplayshortskip 4\p@ \@plus 3\p@ \@minus 3\p@
}
\normalsize
\renewcommand{\small}{%
  \@setfontsize\small\@ixpt\@xpt
  \abovedisplayskip      6\p@ \@plus 1.5\p@ \@minus 4\p@
  \abovedisplayshortskip \z@  \@plus 2\p@
  \belowdisplayskip      \abovedisplayskip
  \belowdisplayshortskip 3\p@ \@plus 2\p@   \@minus 2\p@
}
\renewcommand{\footnotesize}{\@setfontsize\footnotesize\@ixpt\@xpt}
\renewcommand{\scriptsize}{\@setfontsize\scriptsize\@viipt\@viiipt}
\renewcommand{\tiny}{\@setfontsize\tiny\@vipt\@viipt}
\renewcommand{\large}{\@setfontsize\large\@xiipt{14}}
\renewcommand{\Large}{\@setfontsize\Large\@xivpt{16}}
\renewcommand{\LARGE}{\@setfontsize\LARGE\@xviipt{20}}
\renewcommand{\huge}{\@setfontsize\huge\@xxpt{23}}
\renewcommand{\Huge}{\@setfontsize\Huge\@xxvpt{28}}

% sections with less space
\providecommand{\section}{}
\renewcommand{\section}{%
  \@startsection{section}{1}{\z@}%
                {-2.0ex \@plus -0.5ex \@minus -0.2ex}%
                { 1.5ex \@plus  0.3ex \@minus  0.2ex}%
                {\large\bf\raggedright}%
}
\providecommand{\subsection}{}
\renewcommand{\subsection}{%
  \@startsection{subsection}{2}{\z@}%
                {-1.8ex \@plus -0.5ex \@minus -0.2ex}%
                { 0.8ex \@plus  0.2ex}%
                {\normalsize\bf\raggedright}%
}
\providecommand{\subsubsection}{}
\renewcommand{\subsubsection}{%
  \@startsection{subsubsection}{3}{\z@}%
                {-1.5ex \@plus -0.5ex \@minus -0.2ex}%
                { 0.5ex \@plus  0.2ex}%
                {\normalsize\bf\raggedright}%
}
\providecommand{\paragraph}{}
\renewcommand{\paragraph}{%
  \@startsection{paragraph}{4}{\z@}%
                {1.5ex \@plus 0.5ex \@minus 0.2ex}%
                {-1em}%
                {\normalsize\bf}%
}
\providecommand{\subparagraph}{}
\renewcommand{\subparagraph}{%
  \@startsection{subparagraph}{5}{\z@}%
                {1.5ex \@plus 0.5ex \@minus 0.2ex}%
                {-1em}%
                {\normalsize\bf}%
}
\providecommand{\subsubsubsection}{}
\renewcommand{\subsubsubsection}{%
  \vskip5pt{\noindent\normalsize\rm\raggedright}%
}

% float placement
\renewcommand{\topfraction      }{0.85}
\renewcommand{\bottomfraction   }{0.4}
\renewcommand{\textfraction     }{0.1}
\renewcommand{\floatpagefraction}{0.7}

\newlength{\@neuripsabovecaptionskip}\setlength{\@neuripsabovecaptionskip}{7\p@}
\newlength{\@neuripsbelowcaptionskip}\setlength{\@neuripsbelowcaptionskip}{\z@}

\setlength{\abovecaptionskip}{\@neuripsabovecaptionskip}
\setlength{\belowcaptionskip}{\@neuripsbelowcaptionskip}

% swap above/belowcaptionskip lengths for tables
\renewenvironment{table}
  {\setlength{\abovecaptionskip}{\@neuripsbelowcaptionskip}%
   \setlength{\belowcaptionskip}{\@neuripsabovecaptionskip}%
   \@float{table}}
  {\end@float}

% footnote formatting
\setlength{\footnotesep }{6.65\p@}
\setlength{\skip\footins}{9\p@ \@plus 4\p@ \@minus 2\p@}
\renewcommand{\footnoterule}{\kern-3\p@ \hrule width 12pc \kern 2.6\p@}
\setcounter{footnote}{0}

% paragraph formatting
\setlength{\parindent}{\z@}
\setlength{\parskip  }{5.5\p@}

% list formatting
\setlength{\topsep       }{4\p@ \@plus 1\p@   \@minus 2\p@}
\setlength{\partopsep    }{1\p@ \@plus 0.5\p@ \@minus 0.5\p@}
\setlength{\itemsep      }{2\p@ \@plus 1\p@   \@minus 0.5\p@}
\setlength{\parsep       }{2\p@ \@plus 1\p@   \@minus 0.5\p@}
\setlength{\leftmargin   }{3pc}
\setlength{\leftmargini  }{\leftmargin}
\setlength{\leftmarginii }{2em}
\setlength{\leftmarginiii}{1.5em}
\setlength{\leftmarginiv }{1.0em}
\setlength{\leftmarginv  }{0.5em}
\def\@listi  {\leftmargin\leftmargini}
\def\@listii {\leftmargin\leftmarginii
              \labelwidth\leftmarginii
              \advance\labelwidth-\labelsep
              \topsep  2\p@ \@plus 1\p@    \@minus 0.5\p@
              \parsep  1\p@ \@plus 0.5\p@ \@minus 0.5\p@
              \itemsep \parsep}
\def\@listiii{\leftmargin\leftmarginiii
              \labelwidth\leftmarginiii
              \advance\labelwidth-\labelsep
              \topsep    1\p@ \@plus 0.5\p@ \@minus 0.5\p@
              \parsep    \z@
              \partopsep 0.5\p@ \@plus 0\p@ \@minus 0.5\p@
              \itemsep \topsep}
\def\@listiv {\leftmargin\leftmarginiv
              \labelwidth\leftmarginiv
              \advance\labelwidth-\labelsep}
\def\@listv  {\leftmargin\leftmarginv
              \labelwidth\leftmarginv
              \advance\labelwidth-\labelsep}
\def\@listvi {\leftmargin\leftmarginvi
              \labelwidth\leftmarginvi
              \advance\labelwidth-\labelsep}

% create title
\providecommand{\maketitle}{}
\renewcommand{\maketitle}{%
  \par
  \begingroup
    \renewcommand{\thefootnote}{\fnsymbol{footnote}}
    % for perfect author name centering
    \renewcommand{\@makefnmark}{\hbox to \z@{$^{\@thefnmark}$\hss}}
    % The footnote-mark was overlapping the footnote-text,
    % added the following to fix this problem               (MK)
    \long\def\@makefntext##1{%
      \parindent 1em\noindent
      \hbox to 1.8em{\hss $\m@th ^{\@thefnmark}$}##1
    }
    \thispagestyle{empty}
    \@maketitle
    \@thanks
    \@notice
  \endgroup
  \let\maketitle\relax
  \let\thanks\relax
}

% rules for title box at top of first page
\newcommand{\@toptitlebar}{
  \hrule height 4\p@
  \vskip 0.25in
  \vskip -\parskip%
}
\newcommand{\@bottomtitlebar}{
  \vskip 0.29in
  \vskip -\parskip
  \hrule height 1\p@
  \vskip 0.09in%
}

% create title (includes both anonymized and non-anonymized versions)
\providecommand{\@maketitle}{}
\renewcommand{\@maketitle}{%
  \vbox{%
    \hsize\textwidth
    \linewidth\hsize
    \vskip 0.1in
    \@toptitlebar
    \centering
    {\LARGE\bf \@title\par}
    \@bottomtitlebar
    \if@submission
      \begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}
        Anonymous Author(s) \\
        Affiliation \\
        Address \\
        \texttt{email} \\
      \end{tabular}%
    \else
      \def\And{%
        \end{tabular}\hfil\linebreak[0]\hfil%
        \begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\ignorespaces%
      }
      \def\AND{%
        \end{tabular}\hfil\linebreak[4]\hfil%
        \begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\ignorespaces%
      }
      \begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\@author\end{tabular}%
    \fi
    \vskip 0.3in \@minus 0.1in
  }
}

% add conference notice to bottom of first page
\newcommand{\ftype@noticebox}{8}
\newcommand{\@notice}{%
  % give a bit of extra room back to authors on first page
  \enlargethispage{2\baselineskip}%
  \@float{noticebox}[b]%
    \footnotesize\@noticestring%
  \end@float%
}

% abstract styling
\renewenvironment{abstract}%
{%
  \vskip 0.075in%
  \centerline%
  {\large\bf Abstract}%
  \vspace{0.5ex}%
  \begin{quote}%
}
{
  \par%
  \end{quote}%
  \vskip 1ex%
}

\endinput


================================================
FILE: latex-report/report.tex
================================================
\documentclass[]{article}

\usepackage[nonatbib,preprint]{neurips}

\usepackage[utf8]{inputenc} % allow utf-8 input
\usepackage[T1]{fontenc}    % use 8-bit T1 fonts
\usepackage{hyperref}       % hyperlinks
\usepackage{url}            % simple URL typesetting
\usepackage{booktabs}       % professional-quality tables
\usepackage{amsfonts}       % blackboard math symbols
\usepackage{nicefrac}       % compact symbols for 1/2, etc.
\usepackage{microtype}      % microtypography
\usepackage{graphicx}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{pgffor} % for loop
\usepackage{xstring} % for declaring lists



\graphicspath{{../figures/example-figures/}}


\title{Generalisation report}

\author{%
	Generalisation report produced by \href{https://github.com/bethgelab/model-vs-human}{model-vs-human}\\
}



\begin{document}

\newcommand{\figwidth}{0.24\textwidth}
\newcommand{\captionspace}{-1.5\baselineskip}
\newcommand{\captionspaceII}{0.6\baselineskip}
\newcommand{\captionspaceBenchmark}{-0.5\baselineskip}


\maketitle

%\begin{abstract}
%\end{abstract}


\input{assets/benchmark_figures.tex}
\input{assets/benchmark_tables.tex}
\input{assets/error_consistency_lineplots.tex}
\input{assets/shape_bias.tex}
\input{assets/nonparametric_accuracy.tex}
\input{assets/consistency_vs_accuracy.tex}
\input{assets/error_consistency_matrices.tex}



\end{document}


================================================
FILE: licenses/CODE_LICENSE
================================================
MIT License

Copyright (c) Robert Geirhos 2021

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: licenses/LICENSES_OVERVIEW.md
================================================
# Overview over licenses

## Dataset licenses

### ImageNet-based datasets

colour, contrast, eidolonI, eidolonII, eidolonIII, false-colour, high-pass, low-pass, phase-scrambling, power-equalisation, rotation, uniform-noise: These datasets are based on the [generalisation-humans-DNNs repository](https://github.com/rgeirhos/generalisation-humans-DNNs/) by [Robert Geirhos](https://github.com/rgeirhos/), which itself is based on ImageNet. Therefore, please read the [ImageNet license](https://image-net.org/download.php).

stylized: This dataset is based on the [texture-vs-shape repository](https://github.com/rgeirhos/texture-vs-shape) by [Robert Geirhos](https://github.com/rgeirhos/) and on ImageNet. Therefore, please read the [ImageNet license](https://image-net.org/download.php).

### Texture-shape-based datasets

cue-conflict, edge, silhouette: These datasets are based on the [texture-vs-shape repository](https://github.com/rgeirhos/texture-vs-shape) and licensed under a [CC-BY-4.0 license](https://creativecommons.org/licenses/by/4.0/) according to the [repository](https://github.com/rgeirhos/texture-vs-shape/blob/master/DATASET_LICENSE).

### ImageNet-Sketch-based datasets

sketch: This dataset is based on the [ImageNet-Sketch repository](https://github.com/HaohanWang/ImageNet-Sketch) by [Haohan Wang](https://github.com/HaohanWang), which is licensed under a [MIT license](https://spdx.org/licenses/MIT.html) according to the [repository](https://github.com/HaohanWang/ImageNet-Sketch/blob/master/LICENSE). 

## Code

### Our code
Our own code is licensed under a MIT license. The license can be found in ``CODE_LICENSE``.

### Model licenses
The models from our model zoo were obtained from various sources. Licenses can be found in ``MODEL_LICENSES``.


================================================
FILE: licenses/MODEL_LICENSES
================================================
-----------------LICENSE for TensorFlow Hub models ---------------
TensorFlow Hub models were obtained from https://github.com/tensorflow/hub 
which has the following license:


                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [yyyy] [name of copyright owner]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


--------------------------LICENSE for SWSL models --------------------------------
SWSL models were obtained from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models 
which has the following license:

Attribution-NonCommercial 4.0 International

=======================================================================

Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.

Using Creative Commons Public Licenses

Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.

     Considerations for licensors: Our public licenses are
     intended for use by those authorized to give the public
     permission to use material in ways otherwise restricted by
     copyright and certain other rights. Our licenses are
     irrevocable. Licensors should read and understand the terms
     and conditions of the license they choose before applying it.
     Licensors should also secure all rights necessary before
     applying our licenses so that the public can reuse the
     material as expected. Licensors should clearly mark any
     material not subject to the license. This includes other CC-
     licensed material, or material used under an exception or
     limitation to copyright. More considerations for licensors:
  wiki.creativecommons.org/Considerations_for_licensors

     Considerations for the public: By using one of our public
     licenses, a licensor grants the public permission to use the
     licensed material under specified terms and conditions. If
     the licensor's permission is not necessary for any reason--for
     example, because of any applicable exception or limitation to
     copyright--then that use is not regulated by the license. Our
     licenses grant only permissions under copyright and certain
     other rights that a licensor has authority to grant. Use of
     the licensed material may still be restricted for other
     reasons, including because others have copyright or other
     rights in the material. A licensor may make special requests,
     such as asking that all changes be marked or described.
     Although not required by our licenses, you are encouraged to
     respect those requests where reasonable. More_considerations
     for the public: 
  wiki.creativecommons.org/Considerations_for_licensees

=======================================================================

Creative Commons Attribution-NonCommercial 4.0 International Public
License

By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-NonCommercial 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.

Section 1 -- Definitions.

  a. Adapted Material means material subject to Copyright and Similar
     Rights that is derived from or based upon the Licensed Material
     and in which the Licensed Material is translated, altered,
     arranged, transformed, or otherwise modified in a manner requiring
     permission under the Copyright and Similar Rights held by the
     Licensor. For purposes of this Public License, where the Licensed
     Material is a musical work, performance, or sound recording,
     Adapted Material is always produced where the Licensed Material is
     synched in timed relation with a moving image.

  b. Adapter's License means the license You apply to Your Copyright
     and Similar Rights in Your contributions to Adapted Material in
     accordance with the terms and conditions of this Public License.

  c. Copyright and Similar Rights means copyright and/or similar rights
     closely related to copyright including, without limitation,
     performance, broadcast, sound recording, and Sui Generis Database
     Rights, without regard to how the rights are labeled or
     categorized. For purposes of this Public License, the rights
     specified in Section 2(b)(1)-(2) are not Copyright and Similar
     Rights.
  d. Effective Technological Measures means those measures that, in the
     absence of proper authority, may not be circumvented under laws
     fulfilling obligations under Article 11 of the WIPO Copyright
     Treaty adopted on December 20, 1996, and/or similar international
     agreements.

  e. Exceptions and Limitations means fair use, fair dealing, and/or
     any other exception or limitation to Copyright and Similar Rights
     that applies to Your use of the Licensed Material.

  f. Licensed Material means the artistic or literary work, database,
     or other material to which the Licensor applied this Public
     License.

  g. Licensed Rights means the rights granted to You subject to the
     terms and conditions of this Public License, which are limited to
     all Copyright and Similar Rights that apply to Your use of the
     Licensed Material and that the Licensor has authority to license.

  h. Licensor means the individual(s) or entity(ies) granting rights
     under this Public License.

  i. NonCommercial means not primarily intended for or directed towards
     commercial advantage or monetary compensation. For purposes of
     this Public License, the exchange of the Licensed Material for
     other material subject to Copyright and Similar Rights by digital
     file-sharing or similar means is NonCommercial provided there is
     no payment of monetary compensation in connection with the
     exchange.

  j. Share means to provide material to the public by any means or
     process that requires permission under the Licensed Rights, such
     as reproduction, public display, public performance, distribution,
     dissemination, communication, or importation, and to make material
     available to the public including in ways that members of the
     public may access the material from a place and at a time
     individually chosen by them.

  k. Sui Generis Database Rights means rights other than copyright
     resulting from Directive 96/9/EC of the European Parliament and of
     the Council of 11 March 1996 on the legal protection of databases,
     as amended and/or succeeded, as well as other essentially
     equivalent rights anywhere in the world.

  l. You means the individual or entity exercising the Licensed Rights
     under this Public License. Your has a corresponding meaning.

Section 2 -- Scope.

  a. License grant.

       1. Subject to the terms and conditions of this Public License,
          the Licensor hereby grants You a worldwide, royalty-free,
          non-sublicensable, non-exclusive, irrevocable license to
          exercise the Licensed Rights in the Licensed Material to:

            a. reproduce and Share the Licensed Material, in whole or
               in part, for NonCommercial purposes only; and

            b. produce, reproduce, and Share Adapted Material for
               NonCommercial purposes only.

       2. Exceptions and Limitations. For the avoidance of doubt, where
          Exceptions and Limitations apply to Your use, this Public
          License does not apply, and You do not need to comply with
          its terms and conditions.

       3. Term. The term of this Public License is specified in Section
          6(a).

       4. Media and formats; technical modifications allowed. The
          Licensor authorizes You to exercise the Licensed Rights in
          all media and formats whether now known or hereafter created,
          and to make technical modifications necessary to do so. The
          Licensor waives and/or agrees not to assert any right or
          authority to forbid You from making technical modifications
          necessary to exercise the Licensed Rights, including
          technical modifications necessary to circumvent Effective
          Technological Measures. For purposes of this Public License,
          simply making modifications authorized by this Section 2(a)
          (4) never produces Adapted Material.

       5. Downstream recipients.

            a. Offer from the Licensor -- Licensed Material. Every
               recipient of the Licensed Material automatically
               receives an offer from the Licensor to exercise the
               Licensed Rights under the terms and conditions of this
               Public License.

            b. No downstream restrictions. You may not offer or impose
               any additional or different terms or conditions on, or
               apply any Effective Technological Measures to, the
               Licensed Material if doing so restricts exercise of the
               Licensed Rights by any recipient of the Licensed
               Material.

       6. No endorsement. Nothing in this Public License constitutes or
          may be construed as permission to assert or imply that You
          are, or that Your use of the Licensed Material is, connected
          with, or sponsored, endorsed, or granted official status by,
          the Licensor or others designated to receive attribution as
          provided in Section 3(a)(1)(A)(i).

  b. Other rights.

       1. Moral rights, such as the right of integrity, are not
          licensed under this Public License, nor are publicity,
          privacy, and/or other similar personality rights; however, to
          the extent possible, the Licensor waives and/or agrees not to
          assert any such rights held by the Licensor to the limited
          extent necessary to allow You to exercise the Licensed
          Rights, but not otherwise.

       2. Patent and trademark rights are not licensed under this
          Public License.

       3. To the extent possible, the Licensor waives any right to
          collect royalties from You for the exercise of the Licensed
          Rights, whether directly or through a collecting society
          under any voluntary or waivable statutory or compulsory
          licensing scheme. In all other cases the Licensor expressly
          reserves any right to collect such royalties, including when
          the Licensed Material is used other than for NonCommercial
          purposes.

Section 3 -- License Conditions.

Your exercise of the Licensed Rights is expressly made subject to the
following conditions.

  a. Attribution.

       1. If You Share the Licensed Material (including in modified
          form), You must:

            a. retain the following if it is supplied by the Licensor
               with the Licensed Material:

                 i. identification of the creator(s) of the Licensed
                    Material and any others designated to receive
                    attribution, in any reasonable manner requested by
                    the Licensor (including by pseudonym if
                    designated);

                ii. a copyright notice;

               iii. a notice that refers to this Public License;

                iv. a notice that refers to the disclaimer of
                    warranties;

                 v. a URI or hyperlink to the Licensed Material to the
                    extent reasonably practicable;

            b. indicate if You modified the Licensed Material and
               retain an indication of any previous modifications; and

            c. indicate the Licensed Material is licensed under this
               Public License, and include the text of, or the URI or
               hyperlink to, this Public License.

       2. You may satisfy the conditions in Section 3(a)(1) in any
          reasonable manner based on the medium, means, and context in
          which You Share the Licensed Material. For example, it may be
          reasonable to satisfy the conditions by providing a URI or
          hyperlink to a resource that includes the required
          information.

       3. If requested by the Licensor, You must remove any of the
          information required by Section 3(a)(1)(A) to the extent
          reasonably practicable.

       4. If You Share Adapted Material You produce, the Adapter's
          License You apply must not prevent recipients of the Adapted
          Material from complying with this Public License.

Section 4 -- Sui Generis Database Rights.

Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:

  a. for the avoidance of doubt, Section 2(a)(1) grants You the right
     to extract, reuse, reproduce, and Share all or a substantial
     portion of the contents of the database for NonCommercial purposes
     only;

  b. if You include all or a substantial portion of the database
     contents in a database in which You have Sui Generis Database
     Rights, then the database in which You have Sui Generis Database
     Rights (but not its individual contents) is Adapted Material; and

  c. You must comply with the conditions in Section 3(a) if You Share
     all or a substantial portion of the contents of the database.

For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.

Section 5 -- Disclaimer of Warranties and Limitation of Liability.

  a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
     EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
     AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
     ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
     IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
     WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
     PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
     ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
     KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
     ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.

  b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
     TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
     NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
     INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
     COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
     USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
     ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
     DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
     IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.

  c. The disclaimer of warranties and limitation of liability provided
     above shall be interpreted in a manner that, to the extent
     possible, most closely approximates an absolute disclaimer and
     waiver of all liability.

Section 6 -- Term and Termination.

  a. This Public License applies for the term of the Copyright and
     Similar Rights licensed here. However, if You fail to comply with
     this Public License, then Your rights under this Public License
     terminate automatically.

  b. Where Your right to use the Licensed Material has terminated under
     Section 6(a), it reinstates:

       1. automatically as of the date the violation is cured, provided
          it is cured within 30 days of Your discovery of the
          violation; or

       2. upon express reinstatement by the Licensor.

     For the avoidance of doubt, this Section 6(b) does not affect any
     right the Licensor may have to seek remedies for Your violations
     of this Public License.

  c. For the avoidance of doubt, the Licensor may also offer the
     Licensed Material under separate terms or conditions or stop
     distributing the Licensed Material at any time; however, doing so
     will not terminate this Public License.

  d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
     License.

Section 7 -- Other Terms and Conditions.

  a. The Licensor shall not be bound by any additional or different
     terms or conditions communicated by You unless expressly agreed.

  b. Any arrangements, understandings, or agreements regarding the
     Licensed Material not stated herein are separate from and
     independent of the terms and conditions of this Public License.

Section 8 -- Interpretation.

  a. For the avoidance of doubt, this Public License does not, and
     shall not be interpreted to, reduce, limit, restrict, or impose
     conditions on any use of the Licensed Material that could lawfully
     be made without permission under this Public License.

  b. To the extent possible, if any provision of this Public License is
     deemed unenforceable, it shall be automatically reformed to the
     minimum extent necessary to make it enforceable. If the provision
     cannot be reformed, it shall be severed from this Public License
     without affecting the enforceability of the remaining terms and
     conditions.

  c. No term or condition of this Public License will be waived and no
     failure to comply consented to unless expressly agreed to by the
     Licensor.

  d. Nothing in this Public License constitutes or may be interpreted
     as a limitation upon, or waiver of, any privileges and immunities
     that apply to the Licensor or You, including from the legal
     processes of any jurisdiction or authority.

=======================================================================

Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.

Creative Commons may be contacted at creativecommons.org.


----------------------LICENSE for vision transformer models -------------------
ViT-L and ViT-B were obtained from https://github.com/lukemelas/PyTorch-Pretrained-ViT.
Other transformers were obtained from https://github.com/rwightman/pytorch-image-models (see below).

---------------------------LICENSE for BagNet models---------------------------
The BagNet models were obtained from https://github.com/wielandbrendel/bag-of-local-features-models 
which has the following license:

MIT License

Copyright (c) 2019 Wieland Brendel

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.



---------------------------LICENSE for adversarially robust models---------------------------
The adversarially robust models were obtained from https://github.com/sacadena/ptrnets 
who adapted https://github.com/microsoft/robust-models-transfer which has the following license:

   MIT License

    Copyright (c) Microsoft Corporation.

    Permission is hereby granted, free of charge, to any person obtaining a copy
    of this software and associated documentation files (the "Software"), to deal
    in the Software without restriction, including without limitation the rights
    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    copies of the Software, and to permit persons to whom the Software is
    furnished to do so, subject to the following conditions:

    The above copyright notice and this permission notice shall be included in all
    copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 

IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.


---------------------------LICENSE for ‘ShapeNet’ models---------------------------
 The ‘ShapeNet’ models were obtained from https://github.com/rgeirhos/texture-vs-shape.

---------------------------LICENSE for SimCLR models---------------------------
Supervised baseline models trained with SimCLR augmentations were kindly provided by Katherine L. Hermann.

The SimCLR models were obtained from https://github.com/sacadena/ptrnets 
who adapted https://github.com/tonylins/simclr-converter.git which has the following license:

                                Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [2020] [Ji Lin]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.



--------------------------LICENSE for PyContrast models --------------------------------

The PyContrast models were obtained from https://github.com/HobbitLong/PyContrast
 which has the following license:

Copyright (c) 2020, Yonglong Tian
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this
  list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,
  this list of conditions and the following disclaimer in the documentation
  and/or other materials provided with the distribution.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


-------------------- LICENSE FOR pytorch-CycleGAN-and-pix2pix ------------------
BSD License

Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this
  list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,
  this list of conditions and the following disclaimer in the documentation
  and/or other materials provided with the distribution.


---------------------------- LICENSE FOR ResNeSt -------------------------------
Apache License

Copyright 2020, Hang Zhang et. al.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.


---------------------------- LICENSE FOR ResNet --------------------------------

BSD 3-Clause License

Copyright (c) Soumith Chintala 2016,
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this
  list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,
  this list of conditions and the following disclaimer in the documentation
  and/or other materials provided with the distribution.

* Neither the name of the copyright holder nor the names of its
  contributors may be used to endorse or promote products derived from
  this software without specific prior written permission.



---------------------------LICENSE for Pytorch-image-models ---------------------------

Pytorch-image-models were obtained from https://github.com/rwightman/pytorch-image-models/ 
which has the following license: 

                            Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "{}"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright 2019 Ross Wightman

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.



--------------------------------LICENSE for CLIP model -----------------------------------

The CLIP model was obtained from https://github.com/openai/CLIP, which has the following license:

MIT License

Copyright (c) 2021 OpenAI

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.

IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.


================================================
FILE: modelvshuman/__init__.py
================================================
from . import cli
from . import datasets
from . import evaluation
from . import models
from . import plotting
from .model_evaluator import ModelEvaluator
from .plotting.plot import plot
from .version import __version__, VERSION

Evaluate = ModelEvaluator
Plot = plot


================================================
FILE: modelvshuman/cli.py
================================================
#!/usr/bin/env python3

import logging

import click

from .datasets import list_datasets
from .models import list_models

logger = logging.getLogger(__name__)

supported_models = list_models("tensorflow") + list_models("pytorch")
supported_datasets = list_datasets()

print("")


@click.command()
@click.option("--models", "-m",
              type=click.Choice(supported_models, case_sensitive=True),
              multiple=True,
              required=True)
@click.option("--datasets", "-d",
              type=click.Choice(supported_datasets,
                                case_sensitive=True),
              multiple=True,
              required=True)
@click.option("--test-run", "-t",
              is_flag=True,
              help="If the test-run flag is set, results will not be saved to csv")
@click.option("--num-workers", "-w",
               type=int,
               default=30,
               help="Number of cpu workers for data loading")
@click.option("--batch-size", "-b",
              type=int,
              default=16,
              help="Batch size during evaluation")
@click.option("--print-predictions", "-p",
              type=bool,
              default=True,
              help="Print predictions")
def main(models, datasets, *args, **kwargs):
    """
    Entry point to the toolkit
    Returns:

    """
        
    from .model_evaluator import ModelEvaluator
    
    if "all" in models:
        models = supported_models
    if "all" in datasets:
        datasets = supported_datasets

    evaluate = ModelEvaluator()
    evaluate(models, datasets, *args, **kwargs)


================================================
FILE: modelvshuman/constants.py
================================================
#!/usr/bin/env python

import os
from os.path import join as pjoin

##################################################################
# DIRECTORIES
##################################################################

PROJ_DIR = str(os.environ.get("MODELVSHUMANDIR", "model-vs-human"))
assert (PROJ_DIR != "None"), "Please set the 'MODELVSHUMANDIR' environment variable as described in the README"
CODE_DIR = pjoin(PROJ_DIR, "modelvshuman")
DATASET_DIR = pjoin(PROJ_DIR, "datasets")
FIGURE_DIR = pjoin(PROJ_DIR, "figures")
RAW_DATA_DIR = pjoin(PROJ_DIR, "raw-data")
PERFORMANCES_DIR = pjoin(RAW_DATA_DIR, "performances")
REPORT_DIR = pjoin(PROJ_DIR, "latex-report/")
ASSETS_DIR = pjoin(PROJ_DIR, "assets/")
ICONS_DIR = pjoin(ASSETS_DIR, "icons/")

##################################################################
# CONSTANTS
##################################################################

IMG_SIZE = 224  # size of input images for most models

##################################################################
# DATASETS
##################################################################

NOISE_GENERALISATION_DATASETS = ["colour",
                                 "contrast",
                                 "high-pass",
                                 "low-pass",
                                 "phase-scrambling",
                                 "power-equalisation",
                                 "false-colour",
                                 "rotation",
                                 "eidolonI",
                                 "eidolonII",
                                 "eidolonIII",
                                 "uniform-noise"]

TEXTURE_SHAPE_DATASETS = ["original", "greyscale",
                          "texture", "edge", "silhouette",
                          "cue-conflict"]

DEFAULT_DATASETS = ["edge", "silhouette", "cue-conflict"] + \
                   NOISE_GENERALISATION_DATASETS + ["sketch", "stylized"]
##################################################################
# PLOT TYPES
##################################################################

PLOT_TYPE_TO_DATASET_MAPPING = {
    # default plot types:
    "shape-bias": ["cue-conflict"],
    "accuracy": NOISE_GENERALISATION_DATASETS,
    "nonparametric-benchmark-barplot": ["edge", "silhouette", "sketch", "stylized"],
    "benchmark-barplot": DEFAULT_DATASETS,
    "scatterplot": DEFAULT_DATASETS,
    "error-consistency-lineplot": NOISE_GENERALISATION_DATASETS,
    "error-consistency": ["cue-conflict", "edge", "silhouette", "sketch", "stylized"],
    # 'unusual' plot types:
    "entropy": NOISE_GENERALISATION_DATASETS,
    "confusion-matrix": DEFAULT_DATASETS,
    }

DEFAULT_PLOT_TYPES = list(PLOT_TYPE_TO_DATASET_MAPPING.keys())
DEFAULT_PLOT_TYPES.remove("entropy")
DEFAULT_PLOT_TYPES.remove("confusion-matrix")

##################################################################
# MODELS
##################################################################

TORCHVISION_MODELS = ["alexnet",
                      "vgg11_bn",
                      "vgg13_bn",
                      "vgg16_bn",
                      "vgg19_bn",
                      "squeezenet1_0",
                      "squeezenet1_1",
                      "densenet121",
                      "densenet169",
                      "densenet201",
                      "inception_v3",
                      "resnet18",
                      "resnet34",
                      "resnet50",
                      "resnet101",
                      "resnet152",
                      "shufflenet_v2_x0_5",
                      "mobilenet_v2",
                      "resnext50_32x4d",
                      "resnext101_32x8d",
                      "wide_resnet50_2",
                      "wide_resnet101_2",
                      "mnasnet0_5",
                      "mnasnet1_0"]

BAGNET_MODELS = ["bagnet9", "bagnet17", "bagnet33"]

SHAPENET_MODELS = ["resnet50_trained_on_SIN",
                   "resnet50_trained_on_SIN_and_IN",
                   "resnet50_trained_on_SIN_and_IN_then_finetuned_on_IN"]

SIMCLR_MODELS = ["simclr_resnet50x1", "simclr_resnet50x2", "simclr_resnet50x4"]

PYCONTRAST_MODELS = ["InsDis", "MoCo", "PIRL", "MoCoV2", "InfoMin"]

SELFSUPERVISED_MODELS = SIMCLR_MODELS + PYCONTRAST_MODELS

EFFICIENTNET_MODELS = ["efficientnet_b0", "noisy_student"]

ADV_ROBUST_MODELS = ["resnet50_l2_eps0", "resnet50_l2_eps0_5",
                     "resnet50_l2_eps1", "resnet50_l2_eps3",
                     "resnet50_l2_eps5"]

VISION_TRANSFORMER_MODELS = ["vit_small_patch16_224", "vit_base_patch16_224",
                             "vit_large_patch16_224"]

BIT_M_MODELS = ["BiTM_resnetv2_50x1", "BiTM_resnetv2_50x3", "BiTM_resnetv2_101x1",
                "BiTM_resnetv2_101x3", "BiTM_resnetv2_152x2", "BiTM_resnetv2_152x4"]

SWAG_MODELS = ["swag_regnety_16gf_in1k", "swag_regnety_32gf_in1k", "swag_regnety_128gf_in1k",
               "swag_vit_b16_in1k", "swag_vit_l16_in1k", "swag_vit_h14_in1k"]


================================================
FILE: modelvshuman/datasets/__init__.py
================================================
from .imagenet import imagenet_validation
from .sketch import sketch
from .stylized import stylized
from .texture_shape import *
from .noise_generalisation import *

from .dataset_converters import ToTensorflow

from .create_dataset import create_dataset
from .create_dataset import create_experiment

from .registry import list_datasets


================================================
FILE: modelvshuman/datasets/base.py
================================================
#!/usr/bin/env python3

import os
from os.path import join as pjoin


class Dataset(object):
    """Base Dataset class

    Attributes:
        name (str): name of the dataset
        params (object): Dataclass object contains following attributes path, image_size, metric, decision_mapping,
                        experiments and container_session
        loader (pytorch loader): Data loader
        args (dict): Other arguments

    """

    def __init__(self,
                 name,
                 params,
                 loader,
                 *args,
                 **kwargs):

        self.name = name
        self.image_size = params.image_size
        self.decision_mapping = params.decision_mapping
        self.info_mapping = params.info_mapping
        self.experiments = params.experiments
        self.metrics = params.metrics
        self.contains_sessions = params.contains_sessions
        self.args = args
        self.kwargs = kwargs

        resize = False if params.image_size == 224 else True

        if self.contains_sessions:
            self.path = pjoin(params.path, "dnn/")
        else:
            self.path = params.path
        assert os.path.exists(self.path), f"dataset {self.name} path not found: " + self.path

        if self.contains_sessions:
            assert all(f.startswith("session-") for f in os.listdir(self.path))
        else:
            assert not any(f.startswith("session-") for f in os.listdir(self.path))

        if self.experiments:
            for e in self.experiments:
                e.name = self.name

        self._loader = None  # this will be lazy-loaded the first time self.loader (the dataloader instance) is called
        self._loader_callback = lambda: loader()(self.path, resize=resize,
                                                 batch_size=self.kwargs["batch_size"],
                                                 num_workers=self.kwargs["num_workers"],
                                                 info_mapping=self.info_mapping)

    @property
    def loader(self):
        if self._loader is None:
            self._loader = self._loader_callback()
        return self._loader

    @loader.setter
    def loader(self, new_loader):
        self._loader = new_loader


================================================
FILE: modelvshuman/datasets/create_dataset.py
================================================
#!/usr/bin/env python3

"""
Create dataset and experiments.
A dataset is a directory with subdirectories, one subdir per class.
An experiment is a directory subdirectories, one subdir per participant.
"""

import os
from os.path import join as pjoin
from os import listdir as ld
import numpy as np
import shutil
import sys
from PIL import Image
import numpy as np
import math
from torchvision import transforms

from ..helper import human_categories as hc
from .. import constants as consts


def resize_crop_image(input_file,
                      resize_size,
                      crop_size):
    """Replace input_file with resized and cropped version (png)."""

    img = Image.open(input_file)
    t = transforms.Compose([transforms.Resize(resize_size),
                            transforms.CenterCrop(crop_size)])
    new_img = t(img)
    os.remove(input_file)
    new_img.save(input_file.replace(".JPEG", ".png"), 'png')


def create_dataset(original_dataset_path,
                   target_dataset_path,
                   rng,
                   min_num_imgs_per_class,
                   max_num_imgs_per_class,
                   target_resize_size,
                   target_crop_size):
    "Create a balanced dataset from a larger (potentially unbalanced) dataset."""

    categories = hc.HumanCategories()

    class_count_dict = dict()
    image_path_dict = dict()
    
    for human_category in sorted(hc.get_human_object_recognition_categories()):
        class_count_dict[human_category] = 0
        image_path_dict[human_category] = list()
 
    for c in sorted(os.listdir(original_dataset_path)):
        human_category = categories.get_human_category_from_WNID(c)
        if human_category is not None:
            class_count_dict[human_category] += len(os.listdir(pjoin(original_dataset_path,
                                                                     c)))
            for image_name in sorted(os.listdir(pjoin(original_dataset_path, c))):
                image_path_dict[human_category].append(pjoin(original_dataset_path,
                                                             c, image_name))

    count = 0
    maximum = 0
    minimum = np.Inf
    for c in sorted(os.listdir(original_dataset_path)):
        num = len(os.listdir(pjoin(original_dataset_path, c)))
        count += num
        if num > maximum:
            maximum = num
        if num < minimum:
            minimum = num

    min_16_classes = np.Inf
    for k, v in class_count_dict.items():
        if v < min_16_classes:
            min_16_classes = v

    print("Total image count: "+str(count))
    print("Max #images per class: "+str(maximum))
    print("Min #images per class: "+str(minimum))
    print("Min #images within 16 classes: "+str(min_16_classes))
    print(class_count_dict)

    assert min_16_classes >= min_num_imgs_per_class, "not enough images"
    num_imgs_per_target_class = max_num_imgs_per_class
    if min_16_classes < num_imgs_per_target_class:
        num_imgs_per_target_class = min_16_classes

    if not os.path.exists(target_dataset_path):
        print("Creating directory "+target_dataset_path)
        os.makedirs(target_dataset_path) 
    else:
        raise OSError("target dataset already exists: "+target_dataset_path)

    for human_category in sorted(hc.get_human_object_recognition_categories()):
        print("Creating category "+human_category)
        category_dir = pjoin(target_dataset_path, human_category)
        if not os.path.exists(category_dir):
            os.makedirs(category_dir)

        num_images = class_count_dict[human_category]
        assert num_images >= min_16_classes, "not enough images found"

        choice = rng.choice(num_images, num_imgs_per_target_class, replace=False)

        assert len(choice) <= len(image_path_dict[human_category])
        assert len(choice) == num_imgs_per_target_class

        for image_index in choice:
            image_index_str = str(image_index+1)
            while len(image_index_str) < 4:
                image_index_str = "0"+image_index_str

            image_path = image_path_dict[human_category][image_index]
            target_image_path = pjoin(target_dataset_path, human_category,
                                      human_category+"-"+image_index_str+"-"+image_path.split("/")[-1].replace("_", "-"))
            shutil.copyfile(image_path, target_image_path)
            resize_crop_image(target_image_path, target_resize_size,
                              target_crop_size)
            

def create_experiment(expt_name,
                      expt_abbreviation,
                      expt_source_dir,
                      expt_target_dir,
                      only_dnn=True,
                      num_subjects=1,
                      rng=None):
    """Create human / CNN experiment.

    parameters:
    - only_dnn: boolean indicating whether this is a DNN experiment
              or not (if not, a human experiment will be created.)
    """

    if not only_dnn:
        assert rng is not None, "Please specify random number generator (rng)!"

    assert("_" not in expt_name), "no '_' in experiment name!"
    assert(os.path.exists(expt_source_dir)), "directory "+expt_source_dir+" does not exist."

    for i in range(0, num_subjects+1):

        if i==0:
            subject_abbreviation = "dnn"
            subject_name="dnn"
        else:
            subject_abbreviation = "s"+get_leading_zeros(i, 2)
            subject_name = "subject-"+get_leading_zeros(i, 2)
        print("Creating experiment for subject: '"+subject_name+"'")

        target_dir = pjoin(expt_target_dir, expt_name,
                           subject_name, "session-1")

        if os.path.exists(target_dir):
            print("Error: target directory "+target_dir+" does already exist.")
            sys.exit(1)
        else:
            os.makedirs(target_dir)

        img_list = []
        for c in sorted(hc.get_human_object_recognition_categories()):
            for x in sorted(ld(pjoin(expt_source_dir, c))):
                input_file = pjoin(expt_source_dir, c, x)
                img_list.append(input_file)
                
        order = np.arange(len(img_list))
        if i != 0:
            rng.shuffle(order)

        for i, img_index in enumerate(order):

            input_file = img_list[img_index]
            imgname = input_file.split("/")[-1]
            correct_category = input_file.split("/")[-2]
            condition = "0"
            target_image_path = pjoin(target_dir,
                                      (get_leading_zeros(i+1)+"_"+
                                       expt_abbreviation+"_"+
                                       subject_abbreviation+"_"+
                                       condition+"_"+
                                       correct_category+"_"+
                                       "00_"+
                                       imgname))

            shutil.copyfile(input_file, target_image_path)


def get_leading_zeros(num, length=4):
    return ("0"*length+str(num))[-length:]


================================================
FILE: modelvshuman/datasets/dataloaders.py
================================================
import torch
from torchvision import transforms
import torchvision.datasets as datasets
from . import info_mappings


class ImageFolderWithPaths(datasets.ImageFolder):
    """Custom dataset that includes image file paths. Extends
    torchvision.datasets.ImageFolder

    Adapted from:
    https://gist.github.com/andrewjong/6b02ff237533b3b2c554701fb53d5c4d
    """

    def __init__(self, *args, **kwargs):

        if "info_mapping" in kwargs.keys():
            self.info_mapping = kwargs["info_mapping"]
            del kwargs["info_mapping"]
        else:
            self.info_mapping = info_mappings.ImageNetInfoMapping()

        super(ImageFolderWithPaths, self).__init__(*args, **kwargs)


    def __getitem__(self, index):
        """override the __getitem__ method. This is the method that dataloader calls."""
        # this is what ImageFolder normally returns
        (sample, target) = super(ImageFolderWithPaths, self).__getitem__(index)

        # the image file path
        path = self.imgs[index][0]
        _, _, _, new_target = self.info_mapping(path)
        original_tuple = (sample, new_target)

        # make a new tuple that includes original and the path
        tuple_with_path = (original_tuple + (path,))
        return tuple_with_path


class PytorchLoader(object):
    """Pytorch Data loader"""

    def __call__(self, path, resize, batch_size, num_workers,
                 info_mapping=None):
        """
        Data loader for pytorch models
        :param path:
        :param resize:
        :param batch_size:
        :param num_workers:
        :return:
        """
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        if resize:
            transformations = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ])
        else:
            transformations = transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ])

        loader = torch.utils.data.DataLoader(
            ImageFolderWithPaths(path, transformations,
                                 info_mapping=info_mapping),
            batch_size=batch_size, shuffle=False,
            num_workers=num_workers, pin_memory=True)

        return loader


================================================
FILE: modelvshuman/datasets/dataset_converters.py
================================================
import tensorflow as tf
import torch
import numpy as np


class ToTensorflow(object):
    """This will actually convert the Pytorch Data loader into Tensorflow DataLoader"""

    def __init__(self, pytorch_loader):
        self.pytorch_loader = pytorch_loader
        self.mean = np.array([0.485, 0.456, 0.406])
        self.std = np.array([0.229, 0.224, 0.225])

    def convert(self, x):
        if isinstance(x, torch.Tensor):
            return x.numpy()
        return x

    def __iter__(self):
        for images, *other in self.pytorch_loader:
            images = images.numpy().transpose([0, 2, 3, 1])  # tensorflow uses channel-last format
            images *= self.std
            images += self.mean
            images = tf.convert_to_tensor(images)
            other = (self.convert(x) for x in other)  # convert target to numpy
            yield (images, *other)


================================================
FILE: modelvshuman/datasets/decision_mappings.py
================================================
#!/usr/bin/env python

import numpy as np
from abc import ABC, abstractmethod

from ..helper import human_categories as hc
from ..helper import wordnet_functions as wnf


class DecisionMapping(ABC):
    def check_input(self, probabilities):
        assert type(probabilities) is np.ndarray
        assert (probabilities >= 0.0).all() and (probabilities <= 1.0).all()

    @abstractmethod
    def __call__(self, probabilities):
        pass


class ImageNetProbabilitiesTo1000ClassesMapping(DecisionMapping):
    """Return the WNIDs sorted by probabilities."""
    def __init__(self):
        self.categories = wnf.get_ilsvrc2012_WNIDs()

    def __call__(self, probabilities):
        self.check_input(probabilities)
        sorted_indices = np.flip(np.argsort(probabilities), axis=-1)
        return np.take(self.categories, sorted_indices, axis=-1)

class ImageNetProbabilitiesTo16ClassesMapping(DecisionMapping):
    """Return the 16 class categories sorted by probabilities"""

    def __init__(self, aggregation_function=None):
        if aggregation_function is None:
            aggregation_function = np.mean
        self.aggregation_function = aggregation_function
        self.categories = hc.get_human_object_recognition_categories()

    def __call__(self, probabilities):
        self.check_input(probabilities)

        aggregated_class_probabilities = []
        c = hc.HumanCategories()

        for category in self.categories:
            indices = c.get_imagenet_indices_for_category(category)
            values = np.take(probabilities, indices, axis=-1)
            aggregated_value = self.aggregation_function(values, axis=-1)
            aggregated_class_probabilities.append(aggregated_value)
        aggregated_class_probabilities = np.transpose(aggregated_class_probabilities)
        sorted_indices = np.flip(np.argsort(aggregated_class_probabilities, axis=-1), axis=-1)
        return np.take(self.categories, sorted_indices, axis=-1)


================================================
FILE: modelvshuman/datasets/experiments.py
================================================
from dataclasses import dataclass, field
from typing import List


@dataclass
class Experiment:
    """
    Experiment parameters
    """
    plotting_conditions: List = field(default_factory=list)
    xlabel: str = 'Condition'
    data_conditions: List = field(default_factory=list)

    def __post_init__(self):
        assert len(self.plotting_conditions) == len(self.data_conditions), \
            "Length of plotting conditions " + str(self.plotting_conditions) + \
            " and data conditions " + str(self.data_conditions) + " must be same"


colour_experiment = Experiment(data_conditions=["cr", "bw"],
                               plotting_conditions=["colour", "greyscale"],
                               xlabel="Colour")

contrast_experiment = Experiment(data_conditions=["c100", "c50", "c30", "c15", "c10", "c05", "c03", "c01"],
                                 plotting_conditions=["100", "50", "30", "15", "10", "5", "3", "1"],
                                 xlabel="Contrast in percent")

high_pass_experiment = Experiment(data_conditions=["inf", "3.0", "1.5", "1.0", "0.7", "0.55", "0.45", "0.4"],
                                  plotting_conditions=["inf", "3.0", "1.5", "1.0", ".7", ".55", ".45", ".4"],
                                  xlabel="Filter standard deviation")

low_pass_experiment = Experiment(data_conditions=["0", "1", "3", "5", "7", "10", "15", "40"],
                                 plotting_conditions=["0", "1", "3", "5", "7", "10", "15", "40"],
                                 xlabel="Filter standard deviation")

phase_scrambling_experiment = Experiment(data_conditions=["0", "30", "60", "90", "120", "150", "180"],
                                         plotting_conditions=["0", "30", "60", "90", "120", "150", "180"],
                                         xlabel="Phase noise width [$^\circ$]")

power_equalisation_experiment = Experiment(data_conditions=["0", "pow"],
                                           plotting_conditions=["original", "equalised"],
                                           xlabel="Power spectrum")

false_colour_experiment = Experiment(data_conditions=["True", "False"],
                                    plotting_conditions=["true", "opponent"],
                                    xlabel="Colour")

rotation_experiment = Experiment(data_conditions=["0", "90", "180", "270"],
                                 plotting_conditions=["0", "90", "180", "270"],
                                 xlabel="Rotation angle [$^\circ$]")

_eidolon_plotting_conditions = ["0", "1", "2", "3", "4", "5", "6", "7"]
_eidolon_xlabel = "$\mathregular{{Log}_2}$ of 'reach' parameter"

eidolonI_experiment = Experiment(data_conditions=["1-10-10", "2-10-10", "4-10-10", "8-10-10",
                                                  "16-10-10", "32-10-10", "64-10-10", "128-10-10"],
                                 plotting_conditions=_eidolon_plotting_conditions.copy(),
                                 xlabel=_eidolon_xlabel)

eidolonII_experiment = Experiment(data_conditions=["1-3-10", "2-3-10", "4-3-10", "8-3-10",
                                                   "16-3-10", "32-3-10", "64-3-10", "128-3-10"],
                                  plotting_conditions=_eidolon_plotting_conditions.copy(),
                                  xlabel=_eidolon_xlabel)

eidolonIII_experiment = Experiment(data_conditions=["1-0-10", "2-0-10", "4-0-10", "8-0-10",
                                                    "16-0-10", "32-0-10", "64-0-10", "128-0-10"],
                                   plotting_conditions=_eidolon_plotting_conditions.copy(),
                                   xlabel=_eidolon_xlabel)

uniform_noise_experiment = Experiment(data_conditions=["0.0", "0.03", "0.05", "0.1", "0.2", "0.35", "0.6", "0.9"],
                                      plotting_conditions=["0.0", ".03", ".05", ".1", ".2", ".35", ".6", ".9"],
                                      xlabel="Uniform noise width")


@dataclass
class DatasetExperiments:
    name: str
    experiments: [Experiment]


def get_experiments(dataset_names):
    datasets = []
    for name in dataset_names:
        name_for_experiment = name.replace("-", "_")
        if f"{name_for_experiment}_experiment" in globals():
            experiments = eval(f"{name_for_experiment}_experiment")
            experiments.name = name
            datasets.append(DatasetExperiments(name=name, experiments=[experiments]))
        else:
            datasets.append(DatasetExperiments(name=name, experiments=[]))
    return datasets


================================================
FILE: modelvshuman/datasets/imagenet.py
================================================
from dataclasses import dataclass, field
from os.path import join as pjoin
from typing import List

from . import decision_mappings, info_mappings
from .base import Dataset
from .dataloaders import PytorchLoader
from .registry import register_dataset
from .. import constants as c
from ..evaluation import metrics as m


@dataclass
class ImageNetParams:
    path: str
    image_size: int = 224
    metrics: list = field(default_factory=lambda: [m.Accuracy(topk=1), m.Accuracy(topk=5)])
    decision_mapping: object = decision_mappings.ImageNetProbabilitiesTo1000ClassesMapping()
    info_mapping: object = info_mappings.ImageNetInfoMapping()
    experiments: List = field(default_factory=list)
    contains_sessions: bool = False


@register_dataset(name='imagenet_validation')
def imagenet_validation(*args, **kwargs):
    params = ImageNetParams(image_size=256,
                            path=pjoin(c.DATASET_DIR, "imagenet_validation"))
    return Dataset(name="imagenet_validation",
                   params=params,
                   loader=PytorchLoader,
                   *args,
                   **kwargs)


================================================
FILE: modelvshuman/datasets/info_mappings.py
================================================
from abc import ABC


class ImagePathToInformationMapping(ABC):
    def __init__(self):
        pass

    def __call__(self, full_path):
        pass


class ImageNetInfoMapping(ImagePathToInformationMapping):
    """
        For ImageNet-like directory structures without sessions/conditions:
        .../{category}/{img_name}
    """

    def __call__(self, full_path):
        session_name = "session-1"
        img_name = full_path.split("/")[-1]
        condition = "NaN"
        category = full_path.split("/")[-2]

        return session_name, img_name, condition, category


class ImageNetCInfoMapping(ImagePathToInformationMapping):
    """
        For the ImageNet-C Dataset with path structure:
        ...{corruption function}/{corruption severity}/{category}/{img_name}
    """

    def __call__(self, full_path):
        session_name = "session-1"
        parts = full_path.split("/")
        img_name = parts[-1]
        category = parts[-2]
        severity = parts[-3]
        corruption = parts[-4]
        condition = "{}-{}".format(corruption, severity)
        return session_name, img_name, condition, category


class InfoMappingWithSessions(ImagePathToInformationMapping):
    """
        Directory/filename structure:
        .../{session_name}/{something}_{something}_{something}_{condition}_{category}_{img_name}
    """

    def __call__(self, full_path):
        session_name = full_path.split("/")[-2]
        img_name = full_path.split("/")[-1]
        condition = img_name.split("_")[3]
        category = img_name.split("_")[4]

        return session_name, img_name, condition, category


================================================
FILE: modelvshuman/datasets/noise_generalisation.py
================================================
from dataclasses import dataclass, field
from os.path import join as pjoin
from typing import List

from .registry import register_dataset
from .. import constants as c
from . import decision_mappings, info_mappings
from .dataloaders import PytorchLoader
from ..evaluation import metrics as m

from .base import Dataset
from .experiments import *

__all__ = ["colour", "contrast", "high_pass", "low_pass",
           "phase_scrambling", "power_equalisation",
           "false_colour", "rotation", "eidolonI",
           "eidolonII", "eidolonIII", "uniform_noise"]


@dataclass
class NoiseGeneralisationParams:
    path: str = ""
    experiments: List = field(default_factory=list)
    image_size: int = 224
    metrics: list = field(default_factory=lambda: [m.Accuracy(topk=1)])
    decision_mapping: object = decision_mappings.ImageNetProbabilitiesTo16ClassesMapping()
    info_mapping: object = info_mappings.InfoMappingWithSessions()
    contains_sessions: bool = True


def _get_dataset(name, params, *args, **kwargs):
    assert params is not None, "Dataset params are missing"
    params.path = pjoin(c.DATASET_DIR, name)
    return Dataset(name=name,
                   params=params,
                   loader=PytorchLoader,
                   *args,
                   **kwargs)


@register_dataset(name="colour")
def colour(*args, **kwargs):
    return _get_dataset(name="colour",
                        params=NoiseGeneralisationParams(experiments=[colour_experiment]),
                        *args, **kwargs)


@register_dataset(name="contrast")
def contrast(*args, **kwargs):
    return _get_dataset(name="contrast",
                        params=NoiseGeneralisationParams(experiments=[contrast_experiment]),
                        *args, **kwargs)


@register_dataset(name="high-pass")
def high_pass(*args, **kwargs):
    return _get_dataset(name="high-pass",
                        params=NoiseGeneralisationParams(experiments=[high_pass_experiment]),
                        *args, **kwargs)


@register_dataset(name="low-pass")
def low_pass(*args, **kwargs):
    return _get_dataset(name="low-pass",
                        params=NoiseGeneralisationParams(experiments=[low_pass_experiment]),
                        *args, **kwargs)


@register_dataset(name="phase-scrambling")
def phase_scrambling(*args, **kwargs):
    return _get_dataset(name="phase-scrambling",
                        params=NoiseGeneralisationParams(experiments=[phase_scrambling_experiment]),
                        *args, **kwargs)


@register_dataset(name="power-equalisation")
def power_equalisation(*args, **kwargs):
    return _get_dataset(name="power-equalisation",
                        params=NoiseGeneralisationParams(experiments=[power_equalisation_experiment]),
                        *args, **kwargs)


@register_dataset(name="false-colour")
def false_colour(*args, **kwargs):
    return _get_dataset(name="false-colour",
                        params=NoiseGeneralisationParams(experiments=[false_colour_experiment]),
                        *args, **kwargs)


@register_dataset(name="rotation")
def rotation(*args, **kwargs):
    return _get_dataset(name="rotation",
                        params=NoiseGeneralisationParams(experiments=[rotation_experiment]),
                        *args, **kwargs)


@register_dataset(name="eidolonI")
def eidolonI(*args, **kwargs):
    return _get_dataset(name="eidolonI",
                        params=NoiseGeneralisationParams(experiments=[eidolonI_experiment]),
                        *args, **kwargs)


@register_dataset(name="eidolonII")
def eidolonII(*args, **kwargs):
    return _get_dataset(name="eidolonII",
                        params=NoiseGeneralisationParams(experiments=[eidolonII_experiment]),
                        *args, **kwargs)


@register_dataset(name="eidolonIII")
def eidolonIII(*args, **kwargs):
    return _get_dataset(name="eidolonIII",
                        params=NoiseGeneralisationParams(experiments=[eidolonIII_experiment]),
                        *args, **kwargs)


@register_dataset(name="uniform-noise")
def uniform_noise(*args, **kwargs):
    return _get_dataset(name="uniform-noise",
                        params=NoiseGeneralisationParams(experiments=[uniform_noise_experiment]),
                        *args, **kwargs)


================================================
FILE: modelvshuman/datasets/registry.py
================================================
from collections import defaultdict

_dataset_registry = {}  # mapping of dataset names to entrypoint fns


def register_dataset(name):
    def inner_decorator(fn):
        # add entries to registry dict/sets
        model_name = fn.__name__
        _dataset_registry[name] = model_name
        return fn
    return inner_decorator


def list_datasets():
    """ Return list of available dataset names, sorted alphabetically
    """
    return _dataset_registry


================================================
FILE: modelvshuman/datasets/sketch.py
================================================
from os.path import join as pjoin

from .base import Dataset
from .dataloaders import PytorchLoader
from .imagenet import ImageNetParams
from .registry import register_dataset
from .. import constants as c
from . import info_mappings, decision_mappings


@register_dataset(name='sketch')
def sketch(*args, **kwargs):
    params = ImageNetParams(path=pjoin(c.DATASET_DIR, "sketch"),
                            decision_mapping=decision_mappings.ImageNetProbabilitiesTo16ClassesMapping(),
                            info_mapping=info_mappings.InfoMappingWithSessions(),
                            contains_sessions=True)
    return Dataset(name="sketch",
                   params=params,
                   loader=PytorchLoader,
                   *args,
                   **kwargs)


================================================
FILE: modelvshuman/datasets/stylized.py
================================================
from os.path import join as pjoin

from .base import Dataset
from .dataloaders import PytorchLoader
from .imagenet import ImageNetParams
from .registry import register_dataset
from .. import constants as c
from . import info_mappings, decision_mappings


@register_dataset(name='stylized')
def stylized(*args, **kwargs):
    params = ImageNetParams(path=pjoin(c.DATASET_DIR, "stylized"),
                            decision_mapping=decision_mappings.ImageNetProbabilitiesTo16ClassesMapping(),
                            info_mapping=info_mappings.InfoMappingWithSessions(),
                            contains_sessions=True)
    return Dataset(name="stylized",
                   params=params,
                   loader=PytorchLoader,
                   *args,
                   **kwargs)


================================================
FILE: modelvshuman/datasets/texture_shape.py
================================================
from dataclasses import dataclass, field
from os.path import join as pjoin
from typing import List

from . import decision_mappings, info_mappings
from .base import Dataset
from .dataloaders import PytorchLoader
from .registry import register_dataset
from .. import constants as c
from ..evaluation import metrics as m

__all__ = ["original", "greyscale", "texture", "edge", "silhouette",
           "cue_conflict"]


@dataclass
class TextureShapeParams:
    path: str
    image_size: int = 224
    metrics: list = field(default_factory=lambda: [m.Accuracy(topk=1)])
    decision_mapping: object = decision_mappings.ImageNetProbabilitiesTo16ClassesMapping()
    info_mapping: object = info_mappings.ImageNetInfoMapping()
    experiments: List = field(default_factory=list)
    contains_sessions: bool = False


def _get_dataset(name, *args, **kwargs):
    params = TextureShapeParams(path=pjoin(c.DATASET_DIR, name))
    return Dataset(name=name,
                   params=params,
                   loader=PytorchLoader,
                   *args,
                   **kwargs)


@register_dataset(name="original")
def original(*args, **kwargs):
    return _get_dataset(name="original", *args, **kwargs)


@register_dataset(name="greyscale")
def greyscale(*args, **kwargs):
    return _get_dataset(name="greyscale", *args, **kwargs)


@register_dataset(name="texture")
def texture(*args, **kwargs):
    return _get_dataset(name="texture", *args, **kwargs)


@register_dataset(name="edge")
def edge(*args, **kwargs):
    return _get_dataset(name="edge", *args, **kwargs)


@register_dataset(name="silhouette")
def silhouette(*args, **kwargs):
    return _get_dataset("silhouette", *args, **kwargs)


@register_dataset(name="cue-conflict")
def cue_conflict(*args, **kwargs):
    return _get_dataset("cue-conflict", *args, **kwargs)


================================================
FILE: modelvshuman/evaluation/__init__.py
================================================


================================================
FILE: modelvshuman/evaluation/evaluate.py
================================================
"""
Generic evaluation functionality: evaluate on several datasets.
"""

import csv
import os
import shutil
import numpy as np
from math import isclose
from os.path import join as pjoin

from .. import constants as c

IMAGENET_LABEL_FILE = pjoin(c.CODE_DIR, "evaluation", "imagenet_labels.txt")

def print_performance_to_csv(model_name, dataset_name,
                             performance, metric_name,
                             data_parent_dir=c.PERFORMANCES_DIR):
    if not os.path.exists(data_parent_dir):
        os.makedirs(data_parent_dir)
    csv_file_path = pjoin(data_parent_dir, model_name + ".csv")
    newrow = [model_name, dataset_name,
              metric_name, performance]

    if not os.path.exists(csv_file_path):
        with open(csv_file_path, "w", newline='') as f:
            writer = csv.writer(f)
            writer.writerow(["subj", "dataset_name",
                             "metric_name", "performance"])
            writer.writerow(newrow)

    else:
        # check whether existing row needs to be overwritten, otherwise append
        # new row at the end.
        temp_file_path = csv_file_path.replace(".csv", "_temp.csv")
        with open(csv_file_path, 'r') as f:
            with open(temp_file_path, 'w') as t:
                reader = csv.reader(f)
                writer = csv.writer(t)

                has_overwritten_existing_row = False
                for i, row in enumerate(reader):
                    tmprow = row
                    if i >= 1:
                        assert row[0] == model_name
                        if row[1] == dataset_name and row[2] == metric_name:
                            tmprow = newrow
                            has_overwritten_existing_row = True
                    writer.writerow(tmprow)
                if not has_overwritten_existing_row:
                    writer.writerow(newrow)
        shutil.move(temp_file_path, csv_file_path)


def print_predictions_to_console(softmax_output, top_n=5,
                                 labels_path=IMAGENET_LABEL_FILE):
    """For each vector in the output batch: print predictions.

    For every vector of shape [1, 1000] in the output batch,
    a softmax is applied to the values. Then, the top_n
    (e.g. top 5) values are printed to the console.

    This can be used to check predictions for individual images.
    """

    assert type(softmax_output) is np.ndarray
    assert len(softmax_output.shape) == 2, \
        "len(softmax_output) needs to be 2 instead of " + str(len(softmax_output.shape))

    labels_file = open(labels_path)
    labels = labels_file.readlines()

    for z in range(softmax_output.shape[0]):
        print()
        print("Predictions for image no. " + str(z + 1))

        softmax_array = softmax_output[z, :]

        assert isclose(sum(softmax_array), 1.0, abs_tol=1e-5), \
            "Sum of softmax values equals " + str(sum(softmax_array)) + " instead of 1.0"

        argmax = softmax_array.argsort()[-top_n:][::-1]

        for i, argmax_value in enumerate(argmax):
            predicted_class = labels[argmax_value]
            predicted_class_index = predicted_class.split(":")[0]
            predicted_class_description = predicted_class.split(":")[1].replace("\n", "")

            print("({0}) {1:6.3f} % {2} [{3}]".format(i + 1,
                                                      100 * softmax_array[argmax_value],
                                                      predicted_class_description,
                                                      predicted_class_index))
        print()


class ResultPrinter():

    def __init__(self, model_name, dataset,
                 data_parent_dir=c.RAW_DATA_DIR):

        self.model_name = model_name
        self.dataset = dataset
        self.data_dir = pjoin(data_parent_dir, dataset.name)
        self.decision_mapping = self.dataset.decision_mapping
        self.info_mapping = self.dataset.info_mapping
        self.session_list = []

    def create_session_csv(self, session):

        self.csv_file_path = pjoin(self.data_dir,
                                   self.dataset.name + "_" +
                                   self.model_name.replace("_", "-") + "_" +
                                   session + ".csv")

        if os.path.exists(self.csv_file_path):
            # print("Warning: the following file will be overwritten: "+self.csv_file_path)
            os.remove(self.csv_file_path)

        directory = os.path.dirname(self.csv_file_path)
        if not os.path.exists(directory):
            os.makedirs(directory)

        self.index = 0

        # write csv file header row
        with open(self.csv_file_path, "w") as f:
            writer = csv.writer(f)
            writer.writerow(["subj", "session", "trial",
                             "rt", "object_response", "category",
                             "condition", "imagename"])


    def print_batch_to_csv(self, object_response,
                           batch_targets, paths):

        for response, target, path in zip(object_response, batch_targets, paths):

            session_name, img_name, condition, category = self.info_mapping(path)
            session_num = int(session_name.split("-")[-1])

            if not session_num in self.session_list:
                self.session_list.append(session_num)
                self.create_session_csv(session_name)

            with open(self.csv_file_path, "a") as f:
                writer = csv.writer(f)
                writer.writerow([self.model_name,
                                 str(session_num), str(self.index+1),
                                 "NaN", response[0], category,
                                 condition, img_name])
            self.index += 1


================================================
FILE: modelvshuman/evaluation/imagenet_labels.txt
================================================
0:background
1:tench, Tinca tinca
2:goldfish, Carassius auratus
3:great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias
4:tiger shark, Galeocerdo cuvieri
5:hammerhead, hammerhead shark
6:electric ray, crampfish, numbfish, torpedo
7:stingray
8:cock
9:hen
10:ostrich, Struthio camelus
11:brambling, Fringilla montifringilla
12:goldfinch, Carduelis carduelis
13:house finch, linnet, Carpodacus mexicanus
14:junco, snowbird
15:indigo bunting, indigo finch, indigo bird, Passerina cyanea
16:robin, American robin, Turdus migratorius
17:bulbul
18:jay
19:magpie
20:chickadee
21:water ouzel, dipper
22:kite
23:bald eagle, American eagle, Haliaeetus leucocephalus
24:vulture
25:great grey owl, great gray owl, Strix nebulosa
26:European fire salamander, Salamandra salamandra
27:common newt, Triturus vulgaris
28:eft
29:spotted salamander, Ambystoma maculatum
30:axolotl, mud puppy, Ambystoma mexicanum
31:bullfrog, Rana catesbeiana
32:tree frog, tree-frog
33:tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui
34:loggerhead, loggerhead turtle, Caretta caretta
35:leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea
36:mud turtle
37:terrapin
38:box turtle, box tortoise
39:banded gecko
40:common iguana, iguana, Iguana iguana
41:American chameleon, anole, Anolis carolinensis
42:whiptail, whiptail lizard
43:agama
44:frilled lizard, Chlamydosaurus kingi
45:alligator lizard
46:Gila monster, Heloderma suspectum
47:green lizard, Lacerta viridis
48:African chameleon, Chamaeleo chamaeleon
49:Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis
50:African crocodile, Nile crocodile, Crocodylus niloticus
51:American alligator, Alligator mississipiensis
52:triceratops
53:thunder snake, worm snake, Carphophis amoenus
54:ringneck snake, ring-necked snake, ring snake
55:hognose snake, puff adder, sand viper
56:green snake, grass snake
57:king snake, kingsnake
58:garter snake, grass snake
59:water snake
60:vine snake
61:night snake, Hypsiglena torquata
62:boa constrictor, Constrictor constrictor
63:rock python, rock snake, Python sebae
64:Indian cobra, Naja naja
65:green mamba
66:sea snake
67:horned viper, cerastes, sand viper, horned asp, Cerastes cornutus
68:diamondback, diamondback rattlesnake, Crotalus adamanteus
69:sidewinder, horned rattlesnake, Crotalus cerastes
70:trilobite
71:harvestman, daddy longlegs, Phalangium opilio
72:scorpion
73:black and gold garden spider, Argiope aurantia
74:barn spider, Araneus cavaticus
75:garden spider, Aranea diademata
76:black widow, Latrodectus mactans
77:tarantula
78:wolf spider, hunting spider
79:tick
80:centipede
81:black grouse
82:ptarmigan
83:ruffed grouse, partridge, Bonasa umbellus
84:prairie chicken, prairie grouse, prairie fowl
85:peacock
86:quail
87:partridge
88:African grey, African gray, Psittacus erithacus
89:macaw
90:sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita
91:lorikeet
92:coucal
93:bee eater
94:hornbill
95:hummingbird
96:jacamar
97:toucan
98:drake
99:red-breasted merganser, Mergus serrator
100:goose
101:black swan, Cygnus atratus
102:tusker
103:echidna, spiny anteater, anteater
104:platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus
105:wallaby, brush kangaroo
106:koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus
107:wombat
108:jellyfish
109:sea anemone, anemone
110:brain coral
111:flatworm, platyhelminth
112:nematode, nematode worm, roundworm
113:conch
114:snail
115:slug
116:sea slug, nudibranch
117:chiton, coat-of-mail shell, sea cradle, polyplacophore
118:chambered nautilus, pearly nautilus, nautilus
119:Dungeness crab, Cancer magister
120:rock crab, Cancer irroratus
121:fiddler crab
122:king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica
123:American lobster, Northern lobster, Maine lobster, Homarus americanus
124:spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish
125:crayfish, crawfish, crawdad, crawdaddy
126:hermit crab
127:isopod
128:white stork, Ciconia ciconia
129:black stork, Ciconia nigra
130:spoonbill
131:flamingo
132:little blue heron, Egretta caerulea
133:American egret, great white heron, Egretta albus
134:bittern
135:crane
136:limpkin, Aramus pictus
137:European gallinule, Porphyrio porphyrio
138:American coot, marsh hen, mud hen, water hen, Fulica americana
139:bustard
140:ruddy turnstone, Arenaria interpres
141:red-backed sandpiper, dunlin, Erolia alpina
142:redshank, Tringa totanus
143:dowitcher
144:oystercatcher, oyster catcher
145:pelican
146:king penguin, Aptenodytes patagonica
147:albatross, mollymawk
148:grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus
149:killer whale, killer, orca, grampus, sea wolf, Orcinus orca
150:dugong, Dugong dugon
151:sea lion
152:Chihuahua
153:Japanese spaniel
154:Maltese dog, Maltese terrier, Maltese
155:Pekinese, Pekingese, Peke
156:Shih-Tzu
157:Blenheim spaniel
158:papillon
159:toy terrier
160:Rhodesian ridgeback
161:Afghan hound, Afghan
162:basset, basset hound
163:beagle
164:bloodhound, sleuthhound
165:bluetick
166:black-and-tan coonhound
167:Walker hound, Walker foxhound
168:English foxhound
169:redbone
170:borzoi, Russian wolfhound
171:Irish wolfhound
172:Italian greyhound
173:whippet
174:Ibizan hound, Ibizan Podenco
175:Norwegian elkhound, elkhound
176:otterhound, otter hound
177:Saluki, gazelle hound
178:Scottish deerhound, deerhound
179:Weimaraner
180:Staffordshire bullterrier, Staffordshire bull terrier
181:American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier
182:Bedlington terrier
183:Border terrier
184:Kerry blue terrier
185:Irish terrier
186:Norfolk terrier
187:Norwich terrier
188:Yorkshire terrier
189:wire-haired fox terrier
190:Lakeland terrier
191:Sealyham terrier, Sealyham
192:Airedale, Airedale terrier
193:cairn, cairn terrier
194:Australian terrier
195:Dandie Dinmont, Dandie Dinmont terrier
196:Boston bull, Boston terrier
197:miniature schnauzer
198:giant schnauzer
199:standard schnauzer
200:Scotch terrier, Scottish terrier, Scottie
201:Tibetan terrier, chrysanthemum dog
202:silky terrier, Sydney silky
203:soft-coated wheaten terrier
204:West Highland white terrier
205:Lhasa, Lhasa apso
206:flat-coated retriever
207:curly-coated retriever
208:golden retriever
209:Labrador retriever
210:Chesapeake Bay retriever
211:German short-haired pointer
212:vizsla, Hungarian pointer
213:English setter
214:Irish setter, red setter
215:Gordon setter
216:Brittany spaniel
217:clumber, clumber spaniel
218:English springer, English springer spaniel
219:Welsh springer spaniel
220:cocker spaniel, English cocker spaniel, cocker
221:Sussex spaniel
222:Irish water spaniel
223:kuvasz
224:schipperke
225:groenendael
226:malinois
227:briard
228:kelpie
229:komondor
230:Old English sheepdog, bobtail
231:Shetland sheepdog, Shetland sheep dog, Shetland
232:collie
233:Border collie
234:Bouvier des Flandres, Bouviers des Flandres
235:Rottweiler
236:German shepherd, German shepherd dog, German police dog, alsatian
237:Doberman, Doberman pinscher
238:miniature pinscher
239:Greater Swiss Mountain dog
240:Bernese mountain dog
241:Appenzeller
242:EntleBucher
243:boxer
244:bull mastiff
245:Tibetan mastiff
246:French bulldog
247:Great Dane
248:Saint Bernard, St Bernard
249:Eskimo dog, husky
250:malamute, malemute, Alaskan malamute
251:Siberian husky
252:dalmatian, coach dog, carriage dog
253:affenpinscher, monkey pinscher, monkey dog
254:basenji
255:pug, pug-dog
256:Leonberg
257:Newfoundland, Newfoundland dog
258:Great Pyrenees
259:Samoyed, Samoyede
260:Pomeranian
261:chow, chow chow
262:keeshond
263:Brabancon griffon
264:Pembroke, Pembroke Welsh corgi
265:Cardigan, Cardigan Welsh corgi
266:toy poodle
267:miniature poodle
268:standard poodle
269:Mexican hairless
270:timber wolf, grey wolf, gray wolf, Canis lupus
271:white wolf, Arctic wolf, Canis lupus tundrarum
272:red wolf, maned wolf, Canis rufus, Canis niger
273:coyote, prairie wolf, brush wolf, Canis latrans
274:dingo, warrigal, warragal, Canis dingo
275:dhole, Cuon alpinus
276:African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus
277:hyena, hyaena
278:red fox, Vulpes vulpes
279:kit fox, Vulpes macrotis
280:Arctic fox, white fox, Alopex lagopus
281:grey fox, gray fox, Urocyon cinereoargenteus
282:tabby, tabby cat
283:tiger cat
284:Persian cat
285:Siamese cat, Siamese
286:Egyptian cat
287:cougar, puma, catamount, mountain lion, painter, panther, Felis concolor
288:lynx, catamount
289:leopard, Panthera pardus
290:snow leopard, ounce, Panthera uncia
291:jaguar, panther, Panthera onca, Felis onca
292:lion, king of beasts, Panthera leo
293:tiger, Panthera tigris
294:cheetah, chetah, Acinonyx jubatus
295:brown bear, bruin, Ursus arctos
296:American black bear, black bear, Ursus americanus, Euarctos americanus
297:ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus
298:sloth bear, Melursus ursinus, Ursus ursinus
299:mongoose
300:meerkat, mierkat
301:tiger beetle
302:ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle
303:ground beetle, carabid beetle
304:long-horned beetle, longicorn, longicorn beetle
305:leaf beetle, chrysomelid
306:dung beetle
307:rhinoceros beetle
308:weevil
309:fly
310:bee
311:ant, emmet, pismire
312:grasshopper, hopper
313:cricket
314:walking stick, walkingstick, stick insect
315:cockroach, roach
316:mantis, mantid
317:cicada, cicala
318:leafhopper
319:lacewing, lacewing fly
320:dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk
321:damselfly
322:admiral
323:ringlet, ringlet butterfly
324:monarch, monarch butterfly, milkweed butterfly, Danaus plexippus
325:cabbage butterfly
326:sulphur butterfly, sulfur butterfly
327:lycaenid, lycaenid butterfly
328:starfish, sea star
329:sea urchin
330:sea cucumber, holothurian
331:wood rabbit, cottontail, cottontail rabbit
332:hare
333:Angora, Angora rabbit
334:hamster
335:porcupine, hedgehog
336:fox squirrel, eastern fox squirrel, Sciurus niger
337:marmot
338:beaver
339:guinea pig, Cavia cobaya
340:sorrel
341:zebra
342:hog, pig, grunter, squealer, Sus scrofa
343:wild boar, boar, Sus scrofa
344:warthog
345:hippopotamus, hippo, river horse, Hippopotamus amphibius
346:ox
347:water buffalo, water ox, Asiatic buffalo, Bubalus bubalis
348:bison
349:ram, tup
350:bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis
351:ibex, Capra ibex
352:hartebeest
353:impala, Aepyceros melampus
354:gazelle
355:Arabian camel, dromedary, Camelus dromedarius
356:llama
357:weasel
358:mink
359:polecat, fitch, foulmart, foumart, Mustela putorius
360:black-footed ferret, ferret, Mustela nigripes
361:otter
362:skunk, polecat, wood pussy
363:badger
364:armadillo
365:three-toed sloth, ai, Bradypus tridactylus
366:orangutan, orang, orangutang, Pongo pygmaeus
367:gorilla, Gorilla gorilla
368:chimpanzee, chimp, Pan troglodytes
369:gibbon, Hylobates lar
370:siamang, Hylobates syndactylus, Symphalangus syndactylus
371:guenon, guenon monkey
372:patas, hussar monkey, Erythrocebus patas
373:baboon
374:macaque
375:langur
376:colobus, colobus monkey
377:proboscis monkey, Nasalis larvatus
378:marmoset
379:capuchin, ringtail, Cebus capucinus
380:howler monkey, howler
381:titi, titi monkey
382:spider monkey, Ateles geoffroyi
383:squirrel monkey, Saimiri sciureus
384:Madagascar cat, ring-tailed lemur, Lemur catta
385:indri, indris, Indri indri, Indri brevicaudatus
386:Indian elephant, Elephas maximus
387:African elephant, Loxodonta africana
388:lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens
389:giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca
390:barracouta, snoek
391:eel
392:coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch
393:rock beauty, Holocanthus tricolor
394:anemone fish
395:sturgeon
396:gar, garfish, garpike, billfish, Lepisosteus osseus
397:lionfish
398:puffer, pufferfish, blowfish, globefish
399:abacus
400:abaya
401:academic gown, academic robe, judge's robe
402:accordion, piano accordion, squeeze box
403:acoustic guitar
404:aircraft carrier, carrier, flattop, attack aircraft carrier
405:airliner
406:airship, dirigible
407:altar
408:ambulance
409:amphibian, amphibious vehicle
410:analog clock
411:apiary, bee house
412:apron
413:ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin
414:assault rifle, assault gun
415:backpack, back pack, knapsack, packsack, rucksack, haversack
416:bakery, bakeshop, bakehouse
417:balance beam, beam
418:balloon
419:ballpoint, ballpoint pen, ballpen, Biro
420:Band Aid
421:banjo
422:bannister, banister, balustrade, balusters, handrail
423:barbell
424:barber chair
425:barbershop
426:barn
427:barometer
428:barrel, cask
429:barrow, garden cart, lawn cart, wheelbarrow
430:baseball
431:basketball
432:bassinet
433:bassoon
434:bathing cap, swimming cap
435:bath towel
436:bathtub, bathing tub, bath, tub
437:beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
438:beacon, lighthouse, beacon light, pharos
439:beaker
440:bearskin, busby, shako
441:beer bottle
442:beer glass
443:bell cote, bell cot
444:bib
445:bicycle-built-for-two, tandem bicycle, tandem
446:bikini, two-piece
447:binder, ring-binder
448:binoculars, field glasses, opera glasses
449:birdhouse
450:boathouse
451:bobsled, bobsleigh, bob
452:bolo tie, bolo, bola tie, bola
453:bonnet, poke bonnet
454:bookcase
455:bookshop, bookstore, bookstall
456:bottlecap
457:bow
458:bow tie, bow-tie, bowtie
459:brass, memorial tablet, plaque
460:brassiere, bra, bandeau
461:breakwater, groin, groyne, mole, bulwark, seawall, jetty
462:breastplate, aegis, egis
463:broom
464:bucket, pail
465:buckle
466:bulletproof vest
467:bullet train, bullet
468:butcher shop, meat market
469:cab, hack, taxi, taxicab
470:caldron, cauldron
471:candle, taper, wax light
472:cannon
473:canoe
474:can opener, tin opener
475:cardigan
476:car mirror
477:carousel, carrousel, merry-go-round, roundabout, whirligig
478:carpenter's kit, tool kit
479:carton
480:car wheel
481:cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM
482:cassette
483:cassette player
484:castle
485:catamaran
486:CD player
487:cello, violoncello
488:cellular telephone, cellular phone, cellphone, cell, mobile phone
489:chain
490:chainlink fence
491:chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour
492:chain saw, chainsaw
493:chest
494:chiffonier, commode
495:chime, bell, gong
496:china cabinet, china closet
497:Christmas stocking
498:church, church building
499:cinema, movie theater, movie theatre, movie house, picture palace
500:cleaver, meat cleaver, chopper
501:cliff dwelling
502:cloak
503:clog, geta, patten, sabot
504:cocktail shaker
505:coffee mug
506:coffeepot
507:coil, spiral, volute, whorl, helix
508:combination lock
509:computer keyboard, keypad
510:confectionery, confectionary, candy store
511:container ship, containership, container vessel
512:convertible
513:corkscrew, bottle screw
514:cornet, horn, trumpet, trump
515:cowboy boot
516:cowboy hat, ten-gallon hat
517:cradle
518:crane
519:crash helmet
520:crate
521:crib, cot
522:Crock Pot
523:croquet ball
524:crutch
525:cuirass
526:dam, dike, dyke
527:desk
528:desktop computer
529:dial telephone, dial phone
530:diaper, nappy, napkin
531:digital clock
532:digital watch
533:dining table, board
534:dishrag, dishcloth
535:dishwasher, dish washer, dishwashing machine
536:disk brake, disc brake
537:dock, dockage, docking facility
538:dogsled, dog sled, dog sleigh
539:dome
540:doormat, welcome mat
541:drilling platform, offshore rig
542:drum, membranophone, tympan
543:drumstick
544:dumbbell
545:Dutch oven
546:electric fan, blower
547:electric guitar
548:electric locomotive
549:entertainment center
550:envelope
551:espresso maker
552:face powder
553:feather boa, boa
554:file, file cabinet, filing cabinet
555:fireboat
556:fire engine, fire truck
557:fire screen, fireguard
558:flagpole, flagstaff
559:flute, transverse flute
560:folding chair
561:football helmet
562:forklift
563:fountain
564:fountain pen
565:four-poster
566:freight car
567:French horn, horn
568:frying pan, frypan, skillet
569:fur coat
570:garbage truck, dustcart
571:gasmask, respirator, gas helmet
572:gas pump, gasoline pump, petrol pump, island dispenser
573:goblet
574:go-kart
575:golf ball
576:golfcart, golf cart
577:gondola
578:gong, tam-tam
579:gown
580:grand piano, grand
581:greenhouse, nursery, glasshouse
582:grille, radiator grille
583:grocery store, grocery, food market, market
584:guillotine
585:hair slide
586:hair spray
587:half track
588:hammer
589:hamper
590:hand blower, blow dryer, blow drier, hair dryer, hair drier
591:hand-held computer, hand-held microcomputer
592:handkerchief, hankie, hanky, hankey
593:hard disc, hard disk, fixed disk
594:harmonica, mouth organ, harp, mouth harp
595:harp
596:harvester, reaper
597:hatchet
598:holster
599:home theater, home theatre
600:honeycomb
601:hook, claw
602:hoopskirt, crinoline
603:horizontal bar, high bar
604:horse cart, horse-cart
605:hourglass
606:iPod
607:iron, smoothing iron
608:jack-o'-lantern
609:jean, blue jean, denim
610:jeep, landrover
611:jersey, T-shirt, tee shirt
612:jigsaw puzzle
613:jinrikisha, ricksha, rickshaw
614:joystick
615:kimono
616:knee pad
617:knot
618:lab coat, laboratory coat
619:ladle
620:lampshade, lamp shade
621:laptop, laptop computer
622:lawn mower, mower
623:lens cap, lens cover
624:letter opener, paper knife, paperknife
625:library
626:lifeboat
627:lighter, light, igniter, ignitor
628:limousine, limo
629:liner, ocean liner
630:lipstick, lip rouge
631:Loafer
632:lotion
633:loudspeaker, speaker, speaker unit, loudspeaker system, speaker system
634:loupe, jeweler's loupe
635:lumbermill, sawmill
636:magnetic compass
637:mailbag, postbag
638:mailbox, letter box
639:maillot
640:maillot, tank suit
641:manhole cover
642:maraca
643:marimba, xylophone
644:mask
645:matchstick
646:maypole
647:maze, labyrinth
648:measuring cup
649:medicine chest, medicine cabinet
650:megalith, megalithic structure
651:microphone, mike
652:microwave, microwave oven
653:military uniform
654:milk can
655:minibus
656:miniskirt, mini
657:minivan
658:missile
659:mitten
660:mixing bowl
661:mobile home, manufactured home
662:Model T
663:modem
664:monastery
665:monitor
666:moped
667:mortar
668:mortarboard
669:mosque
670:mosquito net
671:motor scooter, scooter
672:mountain bike, all-terrain bike, off-roader
673:mountain tent
674:mouse, computer mouse
675:mousetrap
676:moving van
677:muzzle
678:nail
679:neck brace
680:necklace
681:nipple
682:notebook, notebook computer
683:obelisk
684:oboe, hautboy, hautbois
685:ocarina, sweet potato
686:odometer, hodometer, mileometer, milometer
687:oil filter
688:organ, pipe organ
689:oscilloscope, scope, cathode-ray oscilloscope, CRO
690:overskirt
691:oxcart
692:oxygen mask
693:packet
694:paddle, boat paddle
695:paddlewheel, paddle wheel
696:padlock
697:paintbrush
698:pajama, pyjama, pj's, jammies
699:palace
700:panpipe, pandean pipe, syrinx
701:paper towel
702:parachute, chute
703:parallel bars, bars
704:park bench
705:parking meter
706:passenger car, coach, carriage
707:patio, terrace
708:pay-phone, pay-station
709:pedestal, plinth, footstall
710:pencil box, pencil case
711:pencil sharpener
712:perfume, essence
713:Petri dish
714:photocopier
715:pick, plectrum, plectron
716:pickelhaube
717:picket fence, paling
718:pickup, pickup truck
719:pier
720:piggy bank, penny bank
721:pill bottle
722:pillow
723:ping-pong ball
724:pinwheel
725:pirate, pirate ship
726:pitcher, ewer
727:plane, carpenter's plane, woodworking plane
728:planetarium
729:plastic bag
730:plate rack
731:plow, plough
732:plunger, plumber's helper
733:Polaroid camera, Polaroid Land camera
734:pole
735:police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria
736:poncho
737:pool table, billiard table, snooker table
738:pop bottle, soda bottle
739:pot, flowerpot
740:potter's wheel
741:power drill
742:prayer rug, prayer mat
743:printer
744:prison, prison house
745:projectile, missile
746:projector
747:puck, hockey puck
748:punching bag, punch bag, punching ball, punchball
749:purse
750:quill, quill pen
751:quilt, comforter, comfort, puff
752:racer, race car, racing car
753:racket, racquet
754:radiator
755:radio, wireless
756:radio telescope, radio reflector
757:rain barrel
758:recreational vehicle, RV, R.V.
759:reel
760:reflex camera
761:refrigerator, icebox
762:remote control, remote
763:restaurant, eating house, eating place, eatery
764:revolver, six-gun, six-shooter
765:rifle
766:rocking chair, rocker
767:rotisserie
768:rubber eraser, rubber, pencil eraser
769:rugby ball
770:rule, ruler
771:running shoe
772:safe
773:safety pin
774:saltshaker, salt shaker
775:sandal
776:sarong
777:sax, saxophone
778:scabbard
779:scale, weighing machine
780:school bus
781:schooner
782:scoreboard
783:screen, CRT screen
784:screw
785:screwdriver
786:seat belt, seatbelt
787:sewing machine
788:shield, buckler
789:shoe shop, shoe-shop, shoe store
790:shoji
791:shopping basket
792:shopping cart
793:shovel
794:shower cap
795:shower curtain
796:ski
797:ski mask
798:sleeping bag
799:slide rule, slipstick
800:sliding door
801:slot, one-armed bandit
802:snorkel
803:snowmobile
804:snowplow, snowplough
805:soap dispenser
806:soccer ball
807:sock
808:solar dish, solar collector, solar furnace
809:sombrero
810:soup bowl
811:space bar
812:space heater
813:space shuttle
814:spatula
815:speedboat
816:spider web, spider's web
817:spindle
818:sports car, sport car
819:spotlight, spot
820:stage
821:steam locomotive
822:steel arch bridge
823:steel drum
824:stethoscope
825:stole
826:stone wall
827:stopwatch, stop watch
828:stove
829:strainer
830:streetcar, tram, tramcar, trolley, trolley car
831:stretcher
832:studio couch, day bed
833:stupa, tope
834:submarine, pigboat, sub, U-boat
835:suit, suit of clothes
836:sundial
837:sunglass
838:sunglasses, dark glasses, shades
839:sunscreen, sunblock, sun blocker
840:suspension bridge
841:swab, swob, mop
842:sweatshirt
843:swimming trunks, bathing trunks
844:swing
845:switch, electric switch, electrical switch
846:syringe
847:table lamp
848:tank, army tank, armored combat vehicle, armoured combat vehicle
849:tape player
850:teapot
851:teddy, teddy bear
852:television, television system
853:tennis ball
854:thatch, thatched roof
855:theater curtain, theatre curtain
856:thimble
857:thresher, thrasher, threshing machine
858:throne
859:tile roof
860:toaster
861:tobacco shop, tobacconist shop, tobacconist
862:toilet seat
863:torch
864:totem pole
865:tow truck, tow car, wrecker
866:toyshop
867:tractor
868:trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi
869:tray
870:trench coat
871:tricycle, trike, velocipede
872:trimaran
873:tripod
874:triumphal arch
875:trolleybus, trolley coach, trackless trolley
876:trombone
877:tub, vat
878:turnstile
879:typewriter keyboard
880:umbrella
881:unicycle, monocycle
882:upright, upright piano
883:vacuum, vacuum cleaner
884:vase
885:vault
886:velvet
887:vending machine
888:vestment
889:viaduct
890:violin, fiddle
891:volleyball
892:waffle iron
893:wall clock
894:wallet, billfold, notecase, pocketbook
895:wardrobe, closet, press
896:warplane, military plane
897:washbasin, handbasin, washbowl, lavabo, wash-hand basin
898:washer, automatic washer, washing machine
899:water bottle
900:water jug
901:water tower
902:whiskey jug
903:whistle
904:wig
905:window screen
906:window shade
907:Windsor tie
908:wine bottle
909:wing
910:wok
911:wooden spoon
912:wool, woolen, woollen
913:worm fence, snake fence, snake-rail fence, Virginia fence
914:wreck
915:yawl
916:yurt
917:web site, website, internet site, site
918:comic book
919:crossword puzzle, crossword
920:street sign
921:traffic light, traffic signal, stoplight
922:book jacket, dust cover, dust jacket, dust wrapper
923:menu
924:plate
925:guacamole
926:consomme
927:hot pot, hotpot
928:trifle
929:ice cream, icecream
930:ice lolly, lolly, lollipop, popsicle
931:French loaf
932:bagel, beigel
933:pretzel
934:cheeseburger
935:hotdog, hot dog, red hot
936:mashed potato
937:head cabbage
938:broccoli
939:cauliflower
940:zucchini, courgette
941:spaghetti squash
942:acorn squash
943:butternut squash
944:cucumber, cuke
945:artichoke, globe artichoke
946:bell pepper
947:cardoon
948:mushroom
949:Granny Smith
950:strawberry
951:orange
952:lemon
953:fig
954:pineapple, ananas
955:banana
956:jackfruit, jak, jack
957:custard apple
958:pomegranate
959:hay
960:carbonara
961:chocolate sauce, chocolate syrup
962:dough
963:meat loaf, meatloaf
964:pizza, pizza pie
965:potpie
966:burrito
967:red wine
968:espresso
969:cup
970:eggnog
971:alp
972:bubble
973:cliff, drop, drop-off
974:coral reef
975:geyser
976:lakeside, lakeshore
977:promontory, headland, head, foreland
978:sandbar, sand bar
979:seashore, coast, seacoast, sea-coast
980:valley, vale
981:volcano
982:ballplayer, baseball player
983:groom, bridegroom
984:scuba diver
985:rapeseed
986:daisy
987:yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum
988:corn
989:acorn
990:hip, rose hip, rosehip
991:buckeye, horse chestnut, conker
992:coral fungus
993:agaric
994:gyromitra
995:stinkhorn, carrion fungus
996:earthstar
997:hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa
998:bolete
999:ear, spike, capitulum
1000:toilet tissue, toilet paper, bathroom tissue


================================================
FILE: modelvshuman/evaluation/metrics.py
================================================
"""
Generic evaluation functionality: evaluate on several datasets.
"""
from abc import ABC, abstractmethod

from .. import datasets
from ..helper import human_categories as hc
import numpy as np
import torch
import copy
from .. import constants as c
from ..datasets import info_mappings


class Metric(ABC):
    def __init__(self, name):
        self.name = name
        self.reset()

    def check_input(self, output, target, assert_ndarray=True):
        assert type(output) is np.ndarray
        assert len(output.shape) == 2, "output needs to have len(output.shape) == 2 instead of " + str(len(output.shape))

        if assert_ndarray:
            assert type(target) is np.ndarray
            assert output.shape[0] == target.shape[0]

    @abstractmethod
    def update(self, predictions, targets, paths):
        pass

    @abstractmethod
    def reset(self):
        pass

    @property
    @abstractmethod
    def value(self):
        pass

    def __str__(self):
        return "{}: {}".format(self.name, self.value)


class Accuracy(Metric):
    def __init__(self, name=None, topk=1):
        if name is None:
            name = "accuracy (top-{})".format(topk)
        super(Accuracy, self).__init__(name)
        self.topk = topk

    def reset(self):
        self._sum = 0
        self._count = 0

    def update(self, predictions, targets, paths):
        correct = [t in p[:self.topk] for t, p in zip(targets, predictions)]
        self._sum += np.sum(correct)
        self._count += len(predictions)

    @property
    def value(self):
        if self._count == 0:
            return 0
        return self._sum / self._count

    def __str__(self):
        return "{0:s}: {1:3.2f}".format(self.name, self.value * 100)



================================================
FILE: modelvshuman/helper/__init__.py
================================================


================================================
FILE: modelvshuman/helper/categories.txt
================================================
n01440764 tench, Tinca tinca
n01443537 goldfish, Carassius auratus
n01484850 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias
n01491361 tiger shark, Galeocerdo cuvieri
n01494475 hammerhead, hammerhead shark
n01496331 electric ray, crampfish, numbfish, torpedo
n01498041 stingray
n01514668 cock
n01514859 hen
n01518878 ostrich, Struthio camelus
n01530575 brambling, Fringilla montifringilla
n01531178 goldfinch, Carduelis carduelis
n01532829 house finch, linnet, Carpodacus mexicanus
n01534433 junco, snowbird
n01537544 indigo bunting, indigo finch, indigo bird, Passerina cyanea
n01558993 robin, American robin, Turdus migratorius
n01560419 bulbul
n01580077 jay
n01582220 magpie
n01592084 chickadee
n01601694 water ouzel, dipper
n01608432 kite
n01614925 bald eagle, American eagle, Haliaeetus leucocephalus
n01616318 vulture
n01622779 great grey owl, great gray owl, Strix nebulosa
n01629819 European fire salamander, Salamandra salamandra
n01630670 common newt, Triturus vulgaris
n01631663 eft
n01632458 spotted salamander, Ambystoma maculatum
n01632777 axolotl, mud puppy, Ambystoma mexicanum
n01641577 bullfrog, Rana catesbeiana
n01644373 tree frog, tree-frog
n01644900 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui
n01664065 loggerhead, loggerhead turtle, Caretta caretta
n01665541 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea
n01667114 mud turtle
n01667778 terrapin
n01669191 box turtle, box tortoise
n01675722 banded gecko
n01677366 common iguana, iguana, Iguana iguana
n01682714 American cham
Download .txt
gitextract_kib475aa/

├── .gitignore
├── README.md
├── examples/
│   ├── evaluate.py
│   └── plotting_definition.py
├── latex-report/
│   ├── assets/
│   │   ├── benchmark_figures.tex
│   │   ├── benchmark_table_accuracy.tex
│   │   ├── benchmark_table_humanlike.tex
│   │   ├── benchmark_tables.tex
│   │   ├── consistency_vs_accuracy.tex
│   │   ├── error_consistency_lineplots.tex
│   │   ├── error_consistency_matrices.tex
│   │   ├── noise_generalisation.tex
│   │   ├── nonparametric_accuracy.tex
│   │   └── shape_bias.tex
│   ├── neurips.sty
│   └── report.tex
├── licenses/
│   ├── CODE_LICENSE
│   ├── LICENSES_OVERVIEW.md
│   └── MODEL_LICENSES
├── modelvshuman/
│   ├── __init__.py
│   ├── cli.py
│   ├── constants.py
│   ├── datasets/
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── create_dataset.py
│   │   ├── dataloaders.py
│   │   ├── dataset_converters.py
│   │   ├── decision_mappings.py
│   │   ├── experiments.py
│   │   ├── imagenet.py
│   │   ├── info_mappings.py
│   │   ├── noise_generalisation.py
│   │   ├── registry.py
│   │   ├── sketch.py
│   │   ├── stylized.py
│   │   └── texture_shape.py
│   ├── evaluation/
│   │   ├── __init__.py
│   │   ├── evaluate.py
│   │   ├── imagenet_labels.txt
│   │   └── metrics.py
│   ├── helper/
│   │   ├── __init__.py
│   │   ├── categories.txt
│   │   ├── human_categories.py
│   │   ├── plotting_helper.py
│   │   └── wordnet_functions.py
│   ├── model_evaluator.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── pytorch/
│   │   │   ├── __init__.py
│   │   │   ├── adversarially_robust/
│   │   │   │   ├── __init__.py
│   │   │   │   └── robust_models.py
│   │   │   ├── bagnets/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── kerasnet.py
│   │   │   │   └── pytorchnet.py
│   │   │   ├── clip/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── imagenet_classes.py
│   │   │   │   └── imagenet_templates.py
│   │   │   ├── model_zoo.py
│   │   │   ├── pycontrast/
│   │   │   │   ├── __init__.py
│   │   │   │   └── pycontrast_resnet50.py
│   │   │   ├── shapenet/
│   │   │   │   ├── __init__.py
│   │   │   │   └── texture_shape_models.py
│   │   │   └── simclr/
│   │   │       ├── __init__.py
│   │   │       ├── cores/
│   │   │       │   ├── __init__.py
│   │   │       │   └── cores.py
│   │   │       ├── utils/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── gdrive.py
│   │   │       │   ├── mlayer.py
│   │   │       │   └── modules.py
│   │   │       └── zoo/
│   │   │           ├── __init__.py
│   │   │           └── simclr.py
│   │   ├── registry.py
│   │   ├── tensorflow/
│   │   │   ├── __init__.py
│   │   │   ├── build_model.py
│   │   │   ├── model_zoo.py
│   │   │   └── tf_hub_model_url.py
│   │   └── wrappers/
│   │       ├── __init__.py
│   │       ├── base.py
│   │       ├── pytorch.py
│   │       └── tensorflow.py
│   ├── plotting/
│   │   ├── __init__.py
│   │   ├── analyses.py
│   │   ├── colors.py
│   │   ├── decision_makers.py
│   │   └── plot.py
│   ├── utils.py
│   └── version.py
├── raw-data/
│   ├── colour/
│   │   ├── colour_subject-01_session_1.csv
│   │   ├── colour_subject-02_session_1.csv
│   │   ├── colour_subject-03_session_1.csv
│   │   └── colour_subject-04_session_1.csv
│   ├── contrast/
│   │   ├── contrast_subject-01_session_1.csv
│   │   ├── contrast_subject-02_session_1.csv
│   │   ├── contrast_subject-03_session_1.csv
│   │   └── contrast_subject-04_session_1.csv
│   ├── cue-conflict/
│   │   ├── cue-conflict_subject-01_session_1.csv
│   │   ├── cue-conflict_subject-02_session_1.csv
│   │   ├── cue-conflict_subject-03_session_1.csv
│   │   ├── cue-conflict_subject-04_session_1.csv
│   │   ├── cue-conflict_subject-05_session_1.csv
│   │   ├── cue-conflict_subject-06_session_1.csv
│   │   ├── cue-conflict_subject-07_session_1.csv
│   │   ├── cue-conflict_subject-08_session_1.csv
│   │   ├── cue-conflict_subject-09_session_1.csv
│   │   └── cue-conflict_subject-10_session_1.csv
│   ├── edge/
│   │   ├── edge_subject-01_session_1.csv
│   │   ├── edge_subject-02_session_1.csv
│   │   ├── edge_subject-03_session_1.csv
│   │   ├── edge_subject-04_session_1.csv
│   │   ├── edge_subject-05_session_1.csv
│   │   ├── edge_subject-06_session_1.csv
│   │   ├── edge_subject-07_session_1.csv
│   │   ├── edge_subject-08_session_1.csv
│   │   ├── edge_subject-09_session_1.csv
│   │   └── edge_subject-10_session_1.csv
│   ├── eidolonI/
│   │   ├── eidolonI_subject-01_session_1.csv
│   │   ├── eidolonI_subject-02_session_1.csv
│   │   ├── eidolonI_subject-03_session_1.csv
│   │   └── eidolonI_subject-04_session_1.csv
│   ├── eidolonII/
│   │   ├── eidolonII_subject-01_session_1.csv
│   │   ├── eidolonII_subject-02_session_1.csv
│   │   ├── eidolonII_subject-03_session_1.csv
│   │   └── eidolonII_subject-04_session_1.csv
│   ├── eidolonIII/
│   │   ├── eidolonIII_subject-01_session_1.csv
│   │   ├── eidolonIII_subject-02_session_1.csv
│   │   ├── eidolonIII_subject-03_session_1.csv
│   │   └── eidolonIII_subject-04_session_1.csv
│   ├── false-colour/
│   │   ├── false-colour_subject-01_session_1.csv
│   │   ├── false-colour_subject-02_session_1.csv
│   │   ├── false-colour_subject-03_session_1.csv
│   │   └── false-colour_subject-04_session_1.csv
│   ├── high-pass/
│   │   ├── high-pass_subject-01_session_1.csv
│   │   ├── high-pass_subject-02_session_1.csv
│   │   ├── high-pass_subject-03_session_1.csv
│   │   └── high-pass_subject-04_session_1.csv
│   ├── low-pass/
│   │   ├── low-pass_subject-01_session_1.csv
│   │   ├── low-pass_subject-02_session_1.csv
│   │   ├── low-pass_subject-03_session_1.csv
│   │   └── low-pass_subject-04_session_1.csv
│   ├── phase-scrambling/
│   │   ├── phase-scrambling_subject-01_session_1.csv
│   │   ├── phase-scrambling_subject-02_session_1.csv
│   │   ├── phase-scrambling_subject-03_session_1.csv
│   │   └── phase-scrambling_subject-04_session_1.csv
│   ├── power-equalisation/
│   │   ├── power-equalisation_subject-01_session_1.csv
│   │   ├── power-equalisation_subject-02_session_1.csv
│   │   ├── power-equalisation_subject-03_session_1.csv
│   │   └── power-equalisation_subject-04_session_1.csv
│   ├── rotation/
│   │   ├── rotation_subject-01_session_1.csv
│   │   ├── rotation_subject-02_session_1.csv
│   │   ├── rotation_subject-03_session_1.csv
│   │   └── rotation_subject-04_session_1.csv
│   ├── silhouette/
│   │   ├── silhouette_subject-01_session_1.csv
│   │   ├── silhouette_subject-02_session_1.csv
│   │   ├── silhouette_subject-03_session_1.csv
│   │   ├── silhouette_subject-04_session_1.csv
│   │   ├── silhouette_subject-05_session_1.csv
│   │   ├── silhouette_subject-06_session_1.csv
│   │   ├── silhouette_subject-07_session_1.csv
│   │   ├── silhouette_subject-08_session_1.csv
│   │   ├── silhouette_subject-09_session_1.csv
│   │   └── silhouette_subject-10_session_1.csv
│   ├── sketch/
│   │   ├── sketch_subject-01_session_1.csv
│   │   ├── sketch_subject-02_session_1.csv
│   │   ├── sketch_subject-03_session_1.csv
│   │   ├── sketch_subject-04_session_1.csv
│   │   ├── sketch_subject-05_session_1.csv
│   │   ├── sketch_subject-06_session_1.csv
│   │   └── sketch_subject-07_session_1.csv
│   ├── stylized/
│   │   ├── stylized_subject-01_session_1.csv
│   │   ├── stylized_subject-02_session_1.csv
│   │   ├── stylized_subject-03_session_1.csv
│   │   ├── stylized_subject-04_session_1.csv
│   │   └── stylized_subject-05_session_1.csv
│   └── uniform-noise/
│       ├── uniform-noise_subject-01_session_1.csv
│       ├── uniform-noise_subject-02_session_1.csv
│       ├── uniform-noise_subject-03_session_1.csv
│       └── uniform-noise_subject-04_session_1.csv
├── setup.cfg
└── setup.py
Download .txt
SYMBOL INDEX (410 symbols across 44 files)

FILE: examples/evaluate.py
  function run_evaluation (line 9) | def run_evaluation():
  function run_plotting (line 16) | def run_plotting():

FILE: examples/plotting_definition.py
  function plotting_definition_template (line 12) | def plotting_definition_template(df):
  function get_comparison_decision_makers (line 45) | def get_comparison_decision_makers(df, include_humans=True,

FILE: modelvshuman/cli.py
  function main (line 43) | def main(models, datasets, *args, **kwargs):

FILE: modelvshuman/datasets/base.py
  class Dataset (line 7) | class Dataset(object):
    method __init__ (line 19) | def __init__(self,
    method loader (line 60) | def loader(self):
    method loader (line 66) | def loader(self, new_loader):

FILE: modelvshuman/datasets/create_dataset.py
  function resize_crop_image (line 24) | def resize_crop_image(input_file,
  function create_dataset (line 37) | def create_dataset(original_dataset_path,
  function create_experiment (line 124) | def create_experiment(expt_name,
  function get_leading_zeros (line 191) | def get_leading_zeros(num, length=4):

FILE: modelvshuman/datasets/dataloaders.py
  class ImageFolderWithPaths (line 7) | class ImageFolderWithPaths(datasets.ImageFolder):
    method __init__ (line 15) | def __init__(self, *args, **kwargs):
    method __getitem__ (line 26) | def __getitem__(self, index):
  class PytorchLoader (line 41) | class PytorchLoader(object):
    method __call__ (line 44) | def __call__(self, path, resize, batch_size, num_workers,

FILE: modelvshuman/datasets/dataset_converters.py
  class ToTensorflow (line 6) | class ToTensorflow(object):
    method __init__ (line 9) | def __init__(self, pytorch_loader):
    method convert (line 14) | def convert(self, x):
    method __iter__ (line 19) | def __iter__(self):

FILE: modelvshuman/datasets/decision_mappings.py
  class DecisionMapping (line 10) | class DecisionMapping(ABC):
    method check_input (line 11) | def check_input(self, probabilities):
    method __call__ (line 16) | def __call__(self, probabilities):
  class ImageNetProbabilitiesTo1000ClassesMapping (line 20) | class ImageNetProbabilitiesTo1000ClassesMapping(DecisionMapping):
    method __init__ (line 22) | def __init__(self):
    method __call__ (line 25) | def __call__(self, probabilities):
  class ImageNetProbabilitiesTo16ClassesMapping (line 30) | class ImageNetProbabilitiesTo16ClassesMapping(DecisionMapping):
    method __init__ (line 33) | def __init__(self, aggregation_function=None):
    method __call__ (line 39) | def __call__(self, probabilities):

FILE: modelvshuman/datasets/experiments.py
  class Experiment (line 6) | class Experiment:
    method __post_init__ (line 14) | def __post_init__(self):
  class DatasetExperiments (line 76) | class DatasetExperiments:
  function get_experiments (line 81) | def get_experiments(dataset_names):

FILE: modelvshuman/datasets/imagenet.py
  class ImageNetParams (line 14) | class ImageNetParams:
  function imagenet_validation (line 25) | def imagenet_validation(*args, **kwargs):

FILE: modelvshuman/datasets/info_mappings.py
  class ImagePathToInformationMapping (line 4) | class ImagePathToInformationMapping(ABC):
    method __init__ (line 5) | def __init__(self):
    method __call__ (line 8) | def __call__(self, full_path):
  class ImageNetInfoMapping (line 12) | class ImageNetInfoMapping(ImagePathToInformationMapping):
    method __call__ (line 18) | def __call__(self, full_path):
  class ImageNetCInfoMapping (line 27) | class ImageNetCInfoMapping(ImagePathToInformationMapping):
    method __call__ (line 33) | def __call__(self, full_path):
  class InfoMappingWithSessions (line 44) | class InfoMappingWithSessions(ImagePathToInformationMapping):
    method __call__ (line 50) | def __call__(self, full_path):

FILE: modelvshuman/datasets/noise_generalisation.py
  class NoiseGeneralisationParams (line 21) | class NoiseGeneralisationParams:
  function _get_dataset (line 31) | def _get_dataset(name, params, *args, **kwargs):
  function colour (line 42) | def colour(*args, **kwargs):
  function contrast (line 49) | def contrast(*args, **kwargs):
  function high_pass (line 56) | def high_pass(*args, **kwargs):
  function low_pass (line 63) | def low_pass(*args, **kwargs):
  function phase_scrambling (line 70) | def phase_scrambling(*args, **kwargs):
  function power_equalisation (line 77) | def power_equalisation(*args, **kwargs):
  function false_colour (line 84) | def false_colour(*args, **kwargs):
  function rotation (line 91) | def rotation(*args, **kwargs):
  function eidolonI (line 98) | def eidolonI(*args, **kwargs):
  function eidolonII (line 105) | def eidolonII(*args, **kwargs):
  function eidolonIII (line 112) | def eidolonIII(*args, **kwargs):
  function uniform_noise (line 119) | def uniform_noise(*args, **kwargs):

FILE: modelvshuman/datasets/registry.py
  function register_dataset (line 6) | def register_dataset(name):
  function list_datasets (line 15) | def list_datasets():

FILE: modelvshuman/datasets/sketch.py
  function sketch (line 12) | def sketch(*args, **kwargs):

FILE: modelvshuman/datasets/stylized.py
  function stylized (line 12) | def stylized(*args, **kwargs):

FILE: modelvshuman/datasets/texture_shape.py
  class TextureShapeParams (line 17) | class TextureShapeParams:
  function _get_dataset (line 27) | def _get_dataset(name, *args, **kwargs):
  function original (line 37) | def original(*args, **kwargs):
  function greyscale (line 42) | def greyscale(*args, **kwargs):
  function texture (line 47) | def texture(*args, **kwargs):
  function edge (line 52) | def edge(*args, **kwargs):
  function silhouette (line 57) | def silhouette(*args, **kwargs):
  function cue_conflict (line 62) | def cue_conflict(*args, **kwargs):

FILE: modelvshuman/evaluation/evaluate.py
  function print_performance_to_csv (line 16) | def print_performance_to_csv(model_name, dataset_name,
  function print_predictions_to_console (line 55) | def print_predictions_to_console(softmax_output, top_n=5,
  class ResultPrinter (line 96) | class ResultPrinter():
    method __init__ (line 98) | def __init__(self, model_name, dataset,
    method create_session_csv (line 108) | def create_session_csv(self, session):
    method print_batch_to_csv (line 133) | def print_batch_to_csv(self, object_response,

FILE: modelvshuman/evaluation/metrics.py
  class Metric (line 15) | class Metric(ABC):
    method __init__ (line 16) | def __init__(self, name):
    method check_input (line 20) | def check_input(self, output, target, assert_ndarray=True):
    method update (line 29) | def update(self, predictions, targets, paths):
    method reset (line 33) | def reset(self):
    method value (line 38) | def value(self):
    method __str__ (line 41) | def __str__(self):
  class Accuracy (line 45) | class Accuracy(Metric):
    method __init__ (line 46) | def __init__(self, name=None, topk=1):
    method reset (line 52) | def reset(self):
    method update (line 56) | def update(self, predictions, targets, paths):
    method value (line 62) | def value(self):
    method __str__ (line 67) | def __str__(self):

FILE: modelvshuman/helper/human_categories.py
  function compute_imagenet_indices_for_category (line 16) | def compute_imagenet_indices_for_category(category):
  function get_human_object_recognition_categories (line 33) | def get_human_object_recognition_categories():
  function get_num_human_categories (line 45) | def get_num_human_categories():
  class HumanCategories (line 51) | class HumanCategories(object):
    method get_human_category_from_WNID (line 180) | def get_human_category_from_WNID(self, wnid):
    method get_imagenet_indices_for_category (line 198) | def get_imagenet_indices_for_category(self, category):

FILE: modelvshuman/helper/plotting_helper.py
  function get_short_imagename (line 7) | def get_short_imagename(imagename):
  function read_data (line 27) | def read_data(path):
  function read_all_csv_files_from_directory (line 35) | def read_all_csv_files_from_directory(dir_path):
  function get_experimental_data (line 49) | def get_experimental_data(dataset, print_name=False):
  function crop_pdfs_in_directory (line 72) | def crop_pdfs_in_directory(dir_path, suppress_output=True):

FILE: modelvshuman/helper/wordnet_functions.py
  function get_filenames_of_category (line 13) | def get_filenames_of_category(category, image_labels_path, categories):
  function hypernyms_in_ilsvrc2012_categories (line 44) | def hypernyms_in_ilsvrc2012_categories(entity):
  function get_hypernyms (line 57) | def get_hypernyms(categories_file, entity):
  function get_ilsvrc2012_training_WNID (line 78) | def get_ilsvrc2012_training_WNID(entity):
  function num_hypernyms_in_ilsvrc2012 (line 106) | def num_hypernyms_in_ilsvrc2012(entity):
  function get_ilsvrc2012_categories (line 112) | def get_ilsvrc2012_categories():
  function get_ilsvrc2012_WNIDs (line 128) | def get_ilsvrc2012_WNIDs():
  function get_category_from_line (line 144) | def get_category_from_line(line):
  function get_WNID_from_line (line 153) | def get_WNID_from_line(line):
  function get_WNID_from_index (line 160) | def get_WNID_from_index(index):

FILE: modelvshuman/model_evaluator.py
  function device (line 17) | def device():
  class ModelEvaluator (line 21) | class ModelEvaluator:
    method _pytorch_evaluator (line 23) | def _pytorch_evaluator(self, model_name, model, dataset, *args, **kwar...
    method _tensorflow_evaluator (line 61) | def _tensorflow_evaluator(self, model_name, model, dataset, *args, **k...
    method _get_datasets (line 95) | def _get_datasets(self, dataset_names, *args, **kwargs):
    method _to_tensorflow (line 102) | def _to_tensorflow(self, datasets):
    method _get_evaluator (line 110) | def _get_evaluator(self, framework):
    method _remove_model_from_cache (line 118) | def _remove_model_from_cache(self, framework, model_name):
    method __call__ (line 133) | def __call__(self, models, dataset_names, *args, **kwargs):

FILE: modelvshuman/models/pytorch/adversarially_robust/robust_models.py
  function _model (line 29) | def _model(arch, model_fn, pretrained, progress, use_data_parallel, **kw...
  function resnet50_l2_eps0 (line 44) | def resnet50_l2_eps0(pretrained=True, progress=True, use_data_parallel=F...
  function resnet50_l2_eps0_01 (line 51) | def resnet50_l2_eps0_01(pretrained=True, progress=True, use_data_paralle...
  function resnet50_l2_eps0_03 (line 58) | def resnet50_l2_eps0_03(pretrained=True, progress=True, use_data_paralle...
  function resnet50_l2_eps0_05 (line 65) | def resnet50_l2_eps0_05(pretrained=True, progress=True, use_data_paralle...
  function resnet50_l2_eps0_1 (line 72) | def resnet50_l2_eps0_1(pretrained=True, progress=True, use_data_parallel...
  function resnet50_l2_eps0_25 (line 79) | def resnet50_l2_eps0_25(pretrained=True, progress=True, use_data_paralle...
  function resnet50_l2_eps0_5 (line 86) | def resnet50_l2_eps0_5(pretrained=True, progress=True, use_data_parallel...
  function resnet50_l2_eps1 (line 93) | def resnet50_l2_eps1(pretrained=True, progress=True, use_data_parallel=F...
  function resnet50_l2_eps3 (line 100) | def resnet50_l2_eps3(pretrained=True, progress=True, use_data_parallel=F...
  function resnet50_l2_eps5 (line 107) | def resnet50_l2_eps5(pretrained=True, progress=True, use_data_parallel=F...

FILE: modelvshuman/models/pytorch/bagnets/kerasnet.py
  function bagnet9 (line 13) | def bagnet9():
  function bagnet17 (line 23) | def bagnet17():
  function bagnet33 (line 33) | def bagnet33():

FILE: modelvshuman/models/pytorch/bagnets/pytorchnet.py
  class Bottleneck (line 23) | class Bottleneck(nn.Module):
    method __init__ (line 26) | def __init__(self, inplanes, planes, stride=1, downsample=None, kernel...
    method forward (line 40) | def forward(self, x, **kwargs):
  class BagNet (line 67) | class BagNet(nn.Module):
    method __init__ (line 69) | def __init__(self, block, layers, strides=[1, 2, 2, 2], kernel3=[0, 0,...
    method _make_layer (line 95) | def _make_layer(self, block, planes, blocks, stride=1, kernel3=0, pref...
    method forward (line 114) | def forward(self, x):
  function bagnet33 (line 136) | def bagnet33(pretrained=False, strides=[2, 2, 2, 1], **kwargs):
  function bagnet17 (line 148) | def bagnet17(pretrained=False, strides=[2, 2, 2, 1], **kwargs):
  function bagnet9 (line 160) | def bagnet9(pretrained=False, strides=[2, 2, 2, 1], **kwargs):

FILE: modelvshuman/models/pytorch/model_zoo.py
  function model_pytorch (line 13) | def model_pytorch(model_name, *args):
  function resnet50_trained_on_SIN (line 21) | def resnet50_trained_on_SIN(model_name, *args):
  function resnet50_trained_on_SIN_and_IN (line 28) | def resnet50_trained_on_SIN_and_IN(model_name, *args):
  function resnet50_trained_on_SIN_and_IN_then_finetuned_on_IN (line 35) | def resnet50_trained_on_SIN_and_IN_then_finetuned_on_IN(model_name, *args):
  function bagnet9 (line 42) | def bagnet9(model_name, *args):
  function bagnet17 (line 50) | def bagnet17(model_name, *args):
  function bagnet33 (line 58) | def bagnet33(model_name, *args):
  function simclr_resnet50x1_supervised_baseline (line 66) | def simclr_resnet50x1_supervised_baseline(model_name, *args):
  function simclr_resnet50x4_supervised_baseline (line 73) | def simclr_resnet50x4_supervised_baseline(model_name, *args):
  function simclr_resnet50x1 (line 80) | def simclr_resnet50x1(model_name, *args):
  function simclr_resnet50x2 (line 87) | def simclr_resnet50x2(model_name, *args):
  function simclr_resnet50x4 (line 94) | def simclr_resnet50x4(model_name, *args):
  function InsDis (line 102) | def InsDis(model_name, *args):
  function MoCo (line 109) | def MoCo(model_name, *args):
  function MoCoV2 (line 116) | def MoCoV2(model_name, *args):
  function PIRL (line 123) | def PIRL(model_name, *args):
  function InfoMin (line 130) | def InfoMin(model_name, *args):
  function resnet50_l2_eps0 (line 137) | def resnet50_l2_eps0(model_name, *args):
  function resnet50_l2_eps0_01 (line 144) | def resnet50_l2_eps0_01(model_name, *args):
  function resnet50_l2_eps0_03 (line 151) | def resnet50_l2_eps0_03(model_name, *args):
  function resnet50_l2_eps0_05 (line 158) | def resnet50_l2_eps0_05(model_name, *args):
  function resnet50_l2_eps0_1 (line 165) | def resnet50_l2_eps0_1(model_name, *args):
  function resnet50_l2_eps0_25 (line 172) | def resnet50_l2_eps0_25(model_name, *args):
  function resnet50_l2_eps0_5 (line 179) | def resnet50_l2_eps0_5(model_name, *args):
  function resnet50_l2_eps1 (line 186) | def resnet50_l2_eps1(model_name, *args):
  function resnet50_l2_eps3 (line 193) | def resnet50_l2_eps3(model_name, *args):
  function resnet50_l2_eps5 (line 200) | def resnet50_l2_eps5(model_name, *args):
  function efficientnet_b0 (line 207) | def efficientnet_b0(model_name, *args):
  function efficientnet_es (line 215) | def efficientnet_es(model_name, *args):
  function efficientnet_b0_noisy_student (line 223) | def efficientnet_b0_noisy_student(model_name, *args):
  function efficientnet_l2_noisy_student_475 (line 231) | def efficientnet_l2_noisy_student_475(model_name, *args):
  function transformer_B16_IN21K (line 239) | def transformer_B16_IN21K(model_name, *args):
  function transformer_B32_IN21K (line 246) | def transformer_B32_IN21K(model_name, *args):
  function transformer_L16_IN21K (line 253) | def transformer_L16_IN21K(model_name, *args):
  function transformer_L32_IN21K (line 260) | def transformer_L32_IN21K(model_name, *args):
  function vit_small_patch16_224 (line 267) | def vit_small_patch16_224(model_name, *args):
  function vit_base_patch16_224 (line 276) | def vit_base_patch16_224(model_name, *args):
  function vit_large_patch16_224 (line 285) | def vit_large_patch16_224(model_name, *args):
  function cspresnet50 (line 294) | def cspresnet50(model_name, *args):
  function cspresnext50 (line 302) | def cspresnext50(model_name, *args):
  function cspdarknet53 (line 310) | def cspdarknet53(model_name, *args):
  function darknet53 (line 318) | def darknet53(model_name, *args):
  function dpn68 (line 326) | def dpn68(model_name, *args):
  function dpn68b (line 334) | def dpn68b(model_name, *args):
  function dpn92 (line 342) | def dpn92(model_name, *args):
  function dpn98 (line 350) | def dpn98(model_name, *args):
  function dpn131 (line 358) | def dpn131(model_name, *args):
  function dpn107 (line 366) | def dpn107(model_name, *args):
  function hrnet_w18_small (line 374) | def hrnet_w18_small(model_name, *args):
  function hrnet_w18_small (line 382) | def hrnet_w18_small(model_name, *args):
  function hrnet_w18_small_v2 (line 390) | def hrnet_w18_small_v2(model_name, *args):
  function hrnet_w18 (line 398) | def hrnet_w18(model_name, *args):
  function hrnet_w30 (line 406) | def hrnet_w30(model_name, *args):
  function hrnet_w40 (line 414) | def hrnet_w40(model_name, *args):
  function hrnet_w44 (line 422) | def hrnet_w44(model_name, *args):
  function hrnet_w48 (line 430) | def hrnet_w48(model_name, *args):
  function hrnet_w64 (line 438) | def hrnet_w64(model_name, *args):
  function selecsls42 (line 446) | def selecsls42(model_name, *args):
  function selecsls84 (line 454) | def selecsls84(model_name, *args):
  function selecsls42b (line 462) | def selecsls42b(model_name, *args):
  function selecsls60 (line 470) | def selecsls60(model_name, *args):
  function selecsls60b (line 478) | def selecsls60b(model_name, *args):
  function clip (line 486) | def clip(model_name, *args):
  function clipRN50 (line 493) | def clipRN50(model_name, *args):
  function resnet50_swsl (line 500) | def resnet50_swsl(model_name, *args):
  function ResNeXt101_32x16d_swsl (line 507) | def ResNeXt101_32x16d_swsl(model_name, *args):
  function BiTM_resnetv2_50x1 (line 514) | def BiTM_resnetv2_50x1(model_name, *args):
  function BiTM_resnetv2_50x3 (line 522) | def BiTM_resnetv2_50x3(model_name, *args):
  function BiTM_resnetv2_101x1 (line 530) | def BiTM_resnetv2_101x1(model_name, *args):
  function BiTM_resnetv2_101x3 (line 538) | def BiTM_resnetv2_101x3(model_name, *args):
  function BiTM_resnetv2_152x2 (line 546) | def BiTM_resnetv2_152x2(model_name, *args):
  function BiTM_resnetv2_152x4 (line 554) | def BiTM_resnetv2_152x4(model_name, *args):
  function resnet50_clip_hard_labels (line 562) | def resnet50_clip_hard_labels(model_name, *args):
  function resnet50_clip_soft_labels (line 573) | def resnet50_clip_soft_labels(model_name, *args):
  function swag_regnety_16gf_in1k (line 584) | def swag_regnety_16gf_in1k(model_name, *args):
  function swag_regnety_32gf_in1k (line 590) | def swag_regnety_32gf_in1k(model_name, *args):
  function swag_regnety_128gf_in1k (line 596) | def swag_regnety_128gf_in1k(model_name, *args):
  function swag_vit_b16_in1k (line 602) | def swag_vit_b16_in1k(model_name, *args):
  function swag_vit_l16_in1k (line 608) | def swag_vit_l16_in1k(model_name, *args):
  function swag_vit_h14_in1k (line 614) | def swag_vit_h14_in1k(model_name, *args):

FILE: modelvshuman/models/pytorch/pycontrast/pycontrast_resnet50.py
  function build_classifier (line 19) | def build_classifier(model_name, classes=1000):
  function InsDis (line 38) | def InsDis(pretrained=False, **kwargs):
  function CMC (line 54) | def CMC(pretrained=False, **kwargs):
  function MoCo (line 69) | def MoCo(pretrained=False, **kwargs):
  function MoCoV2 (line 84) | def MoCoV2(pretrained=False, **kwargs):
  function PIRL (line 99) | def PIRL(pretrained=False, **kwargs):
  function InfoMin (line 114) | def InfoMin(pretrained=False, **kwargs):

FILE: modelvshuman/models/pytorch/shapenet/texture_shape_models.py
  function load_model (line 24) | def load_model(model_name):

FILE: modelvshuman/models/pytorch/simclr/cores/cores.py
  class Core (line 11) | class Core:
    method initialize (line 12) | def initialize(self):
    method __repr__ (line 15) | def __repr__(self):
  class Core2d (line 24) | class Core2d(Core):
    method initialize (line 25) | def initialize(self, cuda=False):
    method put_to_cuda (line 29) | def put_to_cuda(self, cuda):
    method init_conv (line 34) | def init_conv(m):
  class TaskDrivenCore (line 41) | class TaskDrivenCore(Core2d, nn.Module):
    method __init__ (line 42) | def __init__(
    method forward (line 106) | def forward(self, input_):
    method regularizer (line 113) | def regularizer(self):
    method outchannels (line 118) | def outchannels(self):
    method initialize (line 128) | def initialize(self, cuda=False):
  class TaskDrivenCore2 (line 134) | class TaskDrivenCore2(Core2d, nn.Module):
    method __init__ (line 135) | def __init__(
    method forward (line 224) | def forward(self, input_):
    method regularizer (line 235) | def regularizer(self):
    method probe_model (line 239) | def probe_model(self):
    method outchannels (line 254) | def outchannels(self):
    method initialize (line 268) | def initialize(self, cuda=False):

FILE: modelvshuman/models/pytorch/simclr/utils/gdrive.py
  function _get_name (line 15) | def _get_name(id):
  function load_state_dict_from_google_drive (line 53) | def load_state_dict_from_google_drive(id, model_dir=None, map_location=N...

FILE: modelvshuman/models/pytorch/simclr/utils/mlayer.py
  function clip_model (line 7) | def clip_model(model, layer_name):
  function probe_model (line 48) | def probe_model(model, layer_name):
  class ModuleHook (line 64) | class ModuleHook():
    method __init__ (line 65) | def __init__(self, module):
    method hook_fn (line 70) | def hook_fn(self, module, input, output):
    method close (line 74) | def close(self):
  function hook_model (line 79) | def hook_model(model):
  function hook_model_module (line 100) | def hook_model_module(model, module):

FILE: modelvshuman/models/pytorch/simclr/utils/modules.py
  class Unnormalize (line 5) | class Unnormalize(nn.Module):
    method __init__ (line 9) | def __init__(self, mean=[0], std=[1], inplace=False):
    method forward (line 15) | def forward(self, x):
  function unnormalize (line 21) | def unnormalize(tensor, mean=[0], std=[1], inplace=False):

FILE: modelvshuman/models/pytorch/simclr/zoo/simclr.py
  function _model (line 45) | def _model(arch, pretrained, block, layers, width_mult, normalized_input...
  function simclr_resnet50x1_supervised_baseline (line 68) | def simclr_resnet50x1_supervised_baseline(pretrained=False, normalized_i...
  function simclr_resnet50x4_supervised_baseline (line 88) | def simclr_resnet50x4_supervised_baseline(pretrained=False, normalized_i...
  function simclr_resnet50x1 (line 108) | def simclr_resnet50x1(pretrained=False, normalized_inputs=True, use_data...
  function simclr_resnet50x2 (line 128) | def simclr_resnet50x2(pretrained=False, normalized_inputs=True, use_data...
  function simclr_resnet50x4 (line 148) | def simclr_resnet50x4(pretrained=False, normalized_inputs=True, use_data...
  function conv3x3 (line 169) | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
  function conv1x1 (line 175) | def conv1x1(in_planes, out_planes, stride=1):
  class BasicBlock (line 180) | class BasicBlock(nn.Module):
    method __init__ (line 184) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
    method forward (line 202) | def forward(self, x):
  class Bottleneck (line 221) | class Bottleneck(nn.Module):
    method __init__ (line 225) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
    method forward (line 242) | def forward(self, x):
  class ResNet (line 265) | class ResNet(nn.Module):
    method __init__ (line 267) | def __init__(self, block, layers, num_classes=1000, zero_init_residual...
    method _make_layer (line 323) | def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
    method _forward_impl (line 347) | def _forward_impl(self, x):
    method forward (line 369) | def forward(self, x):

FILE: modelvshuman/models/registry.py
  function register_model (line 8) | def register_model(framework):
  function list_models (line 17) | def list_models(framework):

FILE: modelvshuman/models/tensorflow/build_model.py
  function build_model_from_hub (line 6) | def build_model_from_hub(model_name):

FILE: modelvshuman/models/tensorflow/model_zoo.py
  function efficientnet_b0 (line 8) | def efficientnet_b0(model_name, *args):
  function resnet50 (line 14) | def resnet50(model_name, *args):
  function mobilenet_v1 (line 20) | def mobilenet_v1(model_name, *args):
  function inception_v1 (line 26) | def inception_v1(model_name, *args):

FILE: modelvshuman/models/wrappers/base.py
  class AbstractModel (line 3) | class AbstractModel(ABC):
    method softmax (line 6) | def softmax(self, logits):
    method forward_batch (line 10) | def forward_batch(self, images):

FILE: modelvshuman/models/wrappers/pytorch.py
  function device (line 15) | def device():
  function undo_default_preprocessing (line 19) | def undo_default_preprocessing(images):
  class PytorchModel (line 32) | class PytorchModel(AbstractModel):
    method __init__ (line 34) | def __init__(self, model, model_name, *args):
    method to_numpy (line 40) | def to_numpy(self, x):
    method softmax (line 46) | def softmax(self, logits):
    method forward_batch (line 53) | def forward_batch(self, images):
  class PyContrastPytorchModel (line 61) | class PyContrastPytorchModel(PytorchModel):
    method __init__ (line 67) | def __init__(self, model, classifier, model_name, *args):
    method forward_batch (line 72) | def forward_batch(self, images):
  class ViTPytorchModel (line 81) | class ViTPytorchModel(PytorchModel):
    method __init__ (line 83) | def __init__(self, model, model_name, img_size=(384, 384), *args):
    method forward_batch (line 87) | def forward_batch(self, images):
    method preprocess (line 98) | def preprocess(self):
  class ClipPytorchModel (line 109) | class ClipPytorchModel(PytorchModel):
    method __init__ (line 111) | def __init__(self, model, model_name, *args):
    method _get_zeroshot_weights (line 115) | def _get_zeroshot_weights(self, class_names, templates):
    method preprocess (line 130) | def preprocess(self):
    method forward_batch (line 140) | def forward_batch(self, images):
  class EfficientNetPytorchModel (line 155) | class EfficientNetPytorchModel(PytorchModel):
    method __init__ (line 157) | def __init__(self, model, model_name, *args):
    method preprocess (line 160) | def preprocess(self):
    method forward_batch (line 173) | def forward_batch(self, images):
  class SwagPytorchModel (line 185) | class SwagPytorchModel(PytorchModel):
    method __init__ (line 187) | def __init__(self, model, model_name, input_size, *args):
    method preprocess (line 191) | def preprocess(self):
    method forward_batch (line 202) | def forward_batch(self, images):

FILE: modelvshuman/models/wrappers/tensorflow.py
  function get_device (line 10) | def get_device(device=None):
  class TensorflowModel (line 20) | class TensorflowModel(AbstractModel):
    method __init__ (line 22) | def __init__(self, model, model_name, *args):
    method softmax (line 27) | def softmax(self, logits):
    method forward_batch (line 31) | def forward_batch(self, images):

FILE: modelvshuman/plotting/analyses.py
  class Analysis (line 16) | class Analysis(ABC):
    method __init__ (line 19) | def __init__(self, *args, **kwargs):
    method _check_dataframe (line 23) | def _check_dataframe(df):
    method analysis (line 27) | def analysis(self, *args, **kwargs):
    method get_result_df (line 31) | def get_result_df(self, *args, **kwars):
    method num_input_models (line 36) | def num_input_models(self):
  class ConfusionAnalysis (line 45) | class ConfusionAnalysis(Analysis):
    method __init__ (line 49) | def __init__(self):
    method analysis (line 54) | def analysis(self, df,
    method get_result_df (line 73) | def get_result_df():
  class ShapeBias (line 78) | class ShapeBias(Analysis):
    method __init__ (line 84) | def __init__(self):
    method analysis (line 89) | def analysis(self, df):
    method get_result_df (line 109) | def get_result_df(self):
    method get_texture_category (line 113) | def get_texture_category(self, imagename):
  class ErrorConsistency (line 131) | class ErrorConsistency(Analysis):
    method __init__ (line 138) | def __init__(self):
    method error_consistency (line 147) | def error_consistency(self, expected_consistency, observed_consistency):
    method analysis (line 161) | def analysis(self, df1, df2):
    method get_result_df (line 194) | def get_result_df(self, df, decision_makers, experiment, column="error...
  class XYAnalysis (line 242) | class XYAnalysis(Analysis):
    method get_result_df (line 244) | def get_result_df(self, df, decision_makers,
  class SixteenClassAccuracy (line 265) | class SixteenClassAccuracy(XYAnalysis):
    method __init__ (line 273) | def __init__(self):
    method analysis (line 281) | def analysis(self, df):
  class SixteenClassAccuracyDifference (line 290) | class SixteenClassAccuracyDifference(XYAnalysis):
    method __init__ (line 296) | def __init__(self):
    method analysis (line 303) | def analysis(self, df1, df2, norm=np.square):
  class Entropy (line 315) | class Entropy(XYAnalysis):
    method __init__ (line 322) | def __init__(self, num_categories=16):
    method analysis (line 330) | def analysis(self, df):
  function get_analysis_list (line 351) | def get_analysis_list(df, conditions, analysis):
  function get_percent_answers_per_category (line 365) | def get_percent_answers_per_category(df):

FILE: modelvshuman/plotting/colors.py
  function rgb (line 6) | def rgb(r, g, b, divide_by=255.0):

FILE: modelvshuman/plotting/decision_makers.py
  class DecisionMaker (line 16) | class DecisionMaker:
    method __post_init__ (line 24) | def __post_init__(self):
    method _get_ID (line 61) | def _get_ID(self):
    method _convert_file_name (line 67) | def _convert_file_name(self, plotting_name):
  function get_individual_decision_makers (line 75) | def get_individual_decision_makers(decision_maker_list):
  function get_human_and_model_decision_makers (line 85) | def get_human_and_model_decision_makers(decision_maker_list):
  function decision_maker_to_attributes (line 98) | def decision_maker_to_attributes(decision_maker_name, decision_maker_list):

FILE: modelvshuman/plotting/plot.py
  function plot (line 76) | def plot(plot_types,
  function plot_nonparallel (line 100) | def plot_nonparallel(plot_types,
  function get_datasets (line 192) | def get_datasets(dataset_names, *args, **kwargs):
  function get_dataset_names (line 200) | def get_dataset_names(plot_type):
  function get_permutations (line 221) | def get_permutations(elements):
  function exclude_conditions (line 236) | def exclude_conditions(dataset):
  function log (line 250) | def log(plot_type, dataset_name):
  function get_human_and_CNN_subjects (line 258) | def get_human_and_CNN_subjects(subjects):
  function get_raw_matrix (line 272) | def get_raw_matrix(dataset,
  function plotting_names_to_data_subjects (line 316) | def plotting_names_to_data_subjects(plotting_names,
  function get_mean_over_datasets (line 332) | def get_mean_over_datasets(colname,
  function x_y_plot (line 376) | def x_y_plot(figure_path,
  function confusion_matrix_helper (line 418) | def confusion_matrix_helper(data, output_filename,
  function plot_shape_bias_matrixplot (line 461) | def plot_shape_bias_matrixplot(datasets,
  function plot_shape_bias_boxplot (line 575) | def plot_shape_bias_boxplot(datasets,
  function plot_error_consistency (line 637) | def plot_error_consistency(datasets, decision_maker_fun, result_dir,
  function plot_matrix (line 644) | def plot_matrix(datasets, analysis,
  function sort_matrix_by_models_mean (line 695) | def sort_matrix_by_models_mean(result_dict):
  function sort_matrix_by_subjects_mean (line 724) | def sort_matrix_by_subjects_mean(result_dict):
  function plot_confusion_matrix (line 752) | def plot_confusion_matrix(datasets,
  function plot_accuracy (line 777) | def plot_accuracy(datasets, decision_maker_fun, result_dir):
  function plot_entropy (line 783) | def plot_entropy(datasets, decision_maker_fun, result_dir):
  function plot_error_consistency_lineplot (line 789) | def plot_error_consistency_lineplot(datasets, decision_maker_fun, result...
  function plot_general_analyses (line 795) | def plot_general_analyses(datasets, analysis, decision_maker_fun,
  function get_raw_benchmark_df (line 817) | def get_raw_benchmark_df(datasets, metric_names, decision_maker_fun,
  function print_benchmark_table_accuracy_to_latex (line 909) | def print_benchmark_table_accuracy_to_latex(df):
  function print_benchmark_table_humanlike_to_latex (line 938) | def print_benchmark_table_humanlike_to_latex(df):
  function plot_benchmark_barplot (line 976) | def plot_benchmark_barplot(datasets, decision_maker_fun, result_dir,
  function format_benchmark_df (line 1036) | def format_benchmark_df(df, decision_makers, metric_names,
  function barplot (line 1078) | def barplot(path, names, values, colors, ylabel=None,
  function plot_scatterplot (line 1114) | def plot_scatterplot(datasets,
  function scatter_plot_helper (line 1171) | def scatter_plot_helper(df, metric_x, metric_y, result_dir, dataset_name):

FILE: modelvshuman/utils.py
  function try_download_dataset_from_github (line 19) | def try_download_dataset_from_github(dataset_name):
  function load_dataset (line 41) | def load_dataset(name, *args, **kwargs):
  function no_op (line 59) | def no_op():
  function load_model (line 64) | def load_model(model_name, *args):
  class AverageMeter (line 79) | class AverageMeter(object):
    method __init__ (line 82) | def __init__(self, name, fmt=':f'):
    method reset (line 87) | def reset(self):
    method update (line 93) | def update(self, val, n=1):
    method __str__ (line 99) | def __str__(self):
Condensed preview — 178 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (8,762K chars).
[
  {
    "path": ".gitignore",
    "chars": 5744,
    "preview": "datasets/*\n\nmodelvshuman/.idea/\n\n# matlab\n*pathdef.m\n\n# tensorflow\n*.h5\n\n*.pickle\n*.csv#\n*.ods#\n*ckpt*\n*.pb\n*.pbtxt\n*ckp"
  },
  {
    "path": "README.md",
    "chars": 12871,
    "preview": "![header](./assets/header/header.png \"header\")\n\n<p align=\"center\">\n  <a href=\"#trophy-benchmark\">Benchmark</a> •\n  <a hr"
  },
  {
    "path": "examples/evaluate.py",
    "chars": 1162,
    "preview": "import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nfrom modelvshuman import Plot, Evaluate\n"
  },
  {
    "path": "examples/plotting_definition.py",
    "chars": 7452,
    "preview": "# /!usr/bin/env python3\n\n\"\"\"\nDefine decision makers (either human participants or CNN models).\n\"\"\"\n\nfrom modelvshuman im"
  },
  {
    "path": "latex-report/assets/benchmark_figures.tex",
    "chars": 1277,
    "preview": "\\begin{figure}[h]\n\t\\begin{subfigure}{0.49\\linewidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\linewidth]{benchmark_16-clas"
  },
  {
    "path": "latex-report/assets/benchmark_table_accuracy.tex",
    "chars": 302,
    "preview": "\\begin{tabular}{lrr}\n\\toprule\n    model & OOD accuracy $\\uparrow$ & rank $\\downarrow$ \\\\\n\\midrule\nSimCLR-x1 &          \\"
  },
  {
    "path": "latex-report/assets/benchmark_table_humanlike.tex",
    "chars": 584,
    "preview": "\\begin{tabular}{lrrrr}\n\\toprule\n    model & accuracy diff. $\\downarrow$ & obs. consistency $\\uparrow$ & error consistenc"
  },
  {
    "path": "latex-report/assets/benchmark_tables.tex",
    "chars": 697,
    "preview": "\\begin{table}[ht]\n\t\\caption{Benchmark table of model results for most human-like behaviour. The three metrics ``accuracy"
  },
  {
    "path": "latex-report/assets/consistency_vs_accuracy.tex",
    "chars": 988,
    "preview": "\\begin{figure}[h]\n    \\centering\n    \\begin{subfigure}{0.45\\textwidth}\n        \\centering\n        \\includegraphics[width"
  },
  {
    "path": "latex-report/assets/error_consistency_lineplots.tex",
    "chars": 5408,
    "preview": "\\begin{figure}\n\t%\t% colour vs. greyscale | true vs. false colour\n\t\\begin{subfigure}{\\figwidth}\n\t\t\t\\centering\n\t\t\t\\textbf{"
  },
  {
    "path": "latex-report/assets/error_consistency_matrices.tex",
    "chars": 1104,
    "preview": "\\begin{figure}\n\t\\centering\n\t\\includegraphics[width=0.8\\linewidth]{sketch_error-consistency_matrix.pdf}\n\t\\caption{Error c"
  },
  {
    "path": "latex-report/assets/noise_generalisation.tex",
    "chars": 5252,
    "preview": "\\begin{figure}\n\t%\t% colour vs. greyscale | true vs. false colour\n\t\\begin{subfigure}{\\figwidth}\n\t\t\t\\centering\n\t\t\t\\textbf{"
  },
  {
    "path": "latex-report/assets/nonparametric_accuracy.tex",
    "chars": 1151,
    "preview": "\\begin{figure}[h]\n\t\\begin{subfigure}{0.49\\linewidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\linewidth]{sketch\\_OOD-accur"
  },
  {
    "path": "latex-report/assets/shape_bias.tex",
    "chars": 327,
    "preview": "\\begin{figure}[h]\n\t\\includegraphics[width=\\linewidth]{cue-conflict_shape-bias_matrixplot.pdf}\n\t\\caption{Shape vs.\\ textu"
  },
  {
    "path": "latex-report/neurips.sty",
    "chars": 11146,
    "preview": "% partial rewrite of the LaTeX2e package for submissions to the\n% Conference on Neural Information Processing Systems (N"
  },
  {
    "path": "latex-report/report.tex",
    "chars": 1428,
    "preview": "\\documentclass[]{article}\r\n\r\n\\usepackage[nonatbib,preprint]{neurips}\r\n\r\n\\usepackage[utf8]{inputenc} % allow utf-8 input\r"
  },
  {
    "path": "licenses/CODE_LICENSE",
    "chars": 1071,
    "preview": "MIT License\n\nCopyright (c) Robert Geirhos 2021\n\nPermission is hereby granted, free of charge, to any person obtaining a "
  },
  {
    "path": "licenses/LICENSES_OVERVIEW.md",
    "chars": 1776,
    "preview": "# Overview over licenses\n\n## Dataset licenses\n\n### ImageNet-based datasets\n\ncolour, contrast, eidolonI, eidolonII, eidol"
  },
  {
    "path": "licenses/MODEL_LICENSES",
    "chars": 62396,
    "preview": "-----------------LICENSE for TensorFlow Hub models ---------------\nTensorFlow Hub models were obtained from https://gith"
  },
  {
    "path": "modelvshuman/__init__.py",
    "chars": 267,
    "preview": "from . import cli\nfrom . import datasets\nfrom . import evaluation\nfrom . import models\nfrom . import plotting\nfrom .mode"
  },
  {
    "path": "modelvshuman/cli.py",
    "chars": 1599,
    "preview": "#!/usr/bin/env python3\n\nimport logging\n\nimport click\n\nfrom .datasets import list_datasets\nfrom .models import list_model"
  },
  {
    "path": "modelvshuman/constants.py",
    "chars": 5001,
    "preview": "#!/usr/bin/env python\n\nimport os\nfrom os.path import join as pjoin\n\n####################################################"
  },
  {
    "path": "modelvshuman/datasets/__init__.py",
    "chars": 338,
    "preview": "from .imagenet import imagenet_validation\nfrom .sketch import sketch\nfrom .stylized import stylized\nfrom .texture_shape "
  },
  {
    "path": "modelvshuman/datasets/base.py",
    "chars": 2260,
    "preview": "#!/usr/bin/env python3\n\nimport os\nfrom os.path import join as pjoin\n\n\nclass Dataset(object):\n    \"\"\"Base Dataset class\n\n"
  },
  {
    "path": "modelvshuman/datasets/create_dataset.py",
    "chars": 7070,
    "preview": "#!/usr/bin/env python3\n\n\"\"\"\nCreate dataset and experiments.\nA dataset is a directory with subdirectories, one subdir per"
  },
  {
    "path": "modelvshuman/datasets/dataloaders.py",
    "chars": 2419,
    "preview": "import torch\nfrom torchvision import transforms\nimport torchvision.datasets as datasets\nfrom . import info_mappings\n\n\ncl"
  },
  {
    "path": "modelvshuman/datasets/dataset_converters.py",
    "chars": 879,
    "preview": "import tensorflow as tf\nimport torch\nimport numpy as np\n\n\nclass ToTensorflow(object):\n    \"\"\"This will actually convert "
  },
  {
    "path": "modelvshuman/datasets/decision_mappings.py",
    "chars": 1963,
    "preview": "#!/usr/bin/env python\n\nimport numpy as np\nfrom abc import ABC, abstractmethod\n\nfrom ..helper import human_categories as "
  },
  {
    "path": "modelvshuman/datasets/experiments.py",
    "chars": 4559,
    "preview": "from dataclasses import dataclass, field\nfrom typing import List\n\n\n@dataclass\nclass Experiment:\n    \"\"\"\n    Experiment p"
  },
  {
    "path": "modelvshuman/datasets/imagenet.py",
    "chars": 1119,
    "preview": "from dataclasses import dataclass, field\nfrom os.path import join as pjoin\nfrom typing import List\n\nfrom . import decisi"
  },
  {
    "path": "modelvshuman/datasets/info_mappings.py",
    "chars": 1621,
    "preview": "from abc import ABC\n\n\nclass ImagePathToInformationMapping(ABC):\n    def __init__(self):\n        pass\n\n    def __call__(s"
  },
  {
    "path": "modelvshuman/datasets/noise_generalisation.py",
    "chars": 4321,
    "preview": "from dataclasses import dataclass, field\nfrom os.path import join as pjoin\nfrom typing import List\n\nfrom .registry impor"
  },
  {
    "path": "modelvshuman/datasets/registry.py",
    "chars": 462,
    "preview": "from collections import defaultdict\n\n_dataset_registry = {}  # mapping of dataset names to entrypoint fns\n\n\ndef register"
  },
  {
    "path": "modelvshuman/datasets/sketch.py",
    "chars": 786,
    "preview": "from os.path import join as pjoin\n\nfrom .base import Dataset\nfrom .dataloaders import PytorchLoader\nfrom .imagenet impor"
  },
  {
    "path": "modelvshuman/datasets/stylized.py",
    "chars": 794,
    "preview": "from os.path import join as pjoin\n\nfrom .base import Dataset\nfrom .dataloaders import PytorchLoader\nfrom .imagenet impor"
  },
  {
    "path": "modelvshuman/datasets/texture_shape.py",
    "chars": 1829,
    "preview": "from dataclasses import dataclass, field\nfrom os.path import join as pjoin\nfrom typing import List\n\nfrom . import decisi"
  },
  {
    "path": "modelvshuman/evaluation/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/evaluation/evaluate.py",
    "chars": 5759,
    "preview": "\"\"\"\nGeneric evaluation functionality: evaluate on several datasets.\n\"\"\"\n\nimport csv\nimport os\nimport shutil\nimport numpy"
  },
  {
    "path": "modelvshuman/evaluation/imagenet_labels.txt",
    "chars": 25581,
    "preview": "0:background\n1:tench, Tinca tinca\n2:goldfish, Carassius auratus\n3:great white shark, white shark, man-eater, man-eating "
  },
  {
    "path": "modelvshuman/evaluation/metrics.py",
    "chars": 1738,
    "preview": "\"\"\"\nGeneric evaluation functionality: evaluate on several datasets.\n\"\"\"\nfrom abc import ABC, abstractmethod\n\nfrom .. imp"
  },
  {
    "path": "modelvshuman/helper/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/helper/categories.txt",
    "chars": 31675,
    "preview": "n01440764 tench, Tinca tinca\nn01443537 goldfish, Carassius auratus\nn01484850 great white shark, white shark, man-eater, "
  },
  {
    "path": "modelvshuman/helper/human_categories.py",
    "chars": 8654,
    "preview": "#!/usr/bin/env python\n\"\"\"human_categories.py\n\nCode to define the class that deals with the specifics\nof the 16 categorie"
  },
  {
    "path": "modelvshuman/helper/plotting_helper.py",
    "chars": 2720,
    "preview": "import pandas as pd\nimport os\n\nfrom .. import constants as c\n\n\ndef get_short_imagename(imagename):\n    \"\"\"Return image-s"
  },
  {
    "path": "modelvshuman/helper/wordnet_functions.py",
    "chars": 4859,
    "preview": "from os.path import join as pjoin\nimport numpy as np\nfrom shutil import copyfile\nimport os\nimport linecache as lc\n\nfrom "
  },
  {
    "path": "modelvshuman/model_evaluator.py",
    "chars": 6420,
    "preview": "import copy\nimport datetime\nimport logging\nimport os\n\nimport torch\nfrom tqdm import tqdm\n\nfrom .datasets import ToTensor"
  },
  {
    "path": "modelvshuman/models/__init__.py",
    "chars": 145,
    "preview": "from .pytorch import model_zoo as pytorch_model_zoo\nfrom .tensorflow import model_zoo as tensorflow_model_zoo\nfrom .regi"
  },
  {
    "path": "modelvshuman/models/pytorch/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/pytorch/adversarially_robust/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/pytorch/adversarially_robust/robust_models.py",
    "chars": 6068,
    "preview": "# Adapted from https://github.com/microsoft/robust-models-transfer by Santiago Cadena\n#     @InProceedings{salman2020adv"
  },
  {
    "path": "modelvshuman/models/pytorch/bagnets/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/pytorch/bagnets/kerasnet.py",
    "chars": 1338,
    "preview": "import keras\nfrom keras.models import load_model\n\n__all__ = ['bagnet9', 'bagnet17', 'bagnet33']\n\nmodel_urls = {\n    'bag"
  },
  {
    "path": "modelvshuman/models/pytorch/bagnets/pytorchnet.py",
    "chars": 6304,
    "preview": "\"\"\"\nCode from\nhttps://github.com/wielandbrendel/bag-of-local-features-models\nAccessed 02.03.2020\n\"\"\"\nimport math\nimport "
  },
  {
    "path": "modelvshuman/models/pytorch/clip/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/pytorch/clip/imagenet_classes.py",
    "chars": 14833,
    "preview": "imagenet_classes = [\"tench\", \"goldfish\", \"great white shark\", \"tiger shark\", \"hammerhead shark\", \"electric ray\", \"stingr"
  },
  {
    "path": "modelvshuman/models/pytorch/clip/imagenet_templates.py",
    "chars": 2332,
    "preview": "imagenet_templates = [\n    'a bad photo of a {}.',\n    'a photo of many {}.',\n    'a sculpture of a {}.',\n    'a photo o"
  },
  {
    "path": "modelvshuman/models/pytorch/model_zoo.py",
    "chars": 20369,
    "preview": "#!/usr/bin/env python3\nimport torch\n\nfrom ..registry import register_model\nfrom ..wrappers.pytorch import PytorchModel, "
  },
  {
    "path": "modelvshuman/models/pytorch/pycontrast/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/pytorch/pycontrast/pycontrast_resnet50.py",
    "chars": 3554,
    "preview": "from collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nCLASSIFIER_WEIGHTS = {\n    \"InsDis\": \"https://g"
  },
  {
    "path": "modelvshuman/models/pytorch/shapenet/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/pytorch/shapenet/texture_shape_models.py",
    "chars": 2701,
    "preview": "\"\"\"\nRead PyTorch model from .pth.tar checkpoint.\nCode from:\nhttps://github.com/rgeirhos/texture-vs-shape/blob/master/mod"
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/__init__.py",
    "chars": 52,
    "preview": "from torchvision.models import *\nfrom .zoo import *\n"
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/cores/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/cores/cores.py",
    "chars": 10006,
    "preview": "from collections import OrderedDict, Iterable\nimport warnings\nimport torch\nfrom torch import nn\nfrom torch.nn import fun"
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/utils/gdrive.py",
    "chars": 4071,
    "preview": "import requests\nimport torch\nimport os\nimport sys\nimport hashlib\nimport re\nimport errno\nfrom tqdm import tqdm\nimport war"
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/utils/mlayer.py",
    "chars": 3486,
    "preview": "# Utils for getting model layers\n\nfrom collections import OrderedDict\nfrom torch import nn\n\n\ndef clip_model(model, layer"
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/utils/modules.py",
    "chars": 1890,
    "preview": "## Custom pytorch modules\nimport torch\nfrom torch import nn\n\nclass Unnormalize(nn.Module):\n    \"\"\"\n    Helper class for "
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/zoo/__init__.py",
    "chars": 22,
    "preview": "from .simclr import *\n"
  },
  {
    "path": "modelvshuman/models/pytorch/simclr/zoo/simclr.py",
    "chars": 15936,
    "preview": "##################\n# SimCLR networks from Chen et al 2020 in pytorch \n# \n# Chen, T., Kornblith, S., Norouzi, M., & Hinto"
  },
  {
    "path": "modelvshuman/models/registry.py",
    "chars": 533,
    "preview": "from collections import defaultdict\n\n__all__ = ['list_models']\n\n_model_registry = defaultdict(list)  # mapping of model "
  },
  {
    "path": "modelvshuman/models/tensorflow/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modelvshuman/models/tensorflow/build_model.py",
    "chars": 280,
    "preview": "from .tf_hub_model_url import tfhub_urls\nimport tensorflow_hub as hub\nimport tensorflow as tf\n\n\ndef build_model_from_hub"
  },
  {
    "path": "modelvshuman/models/tensorflow/model_zoo.py",
    "chars": 802,
    "preview": "from ..registry import register_model\n\nfrom ..wrappers.tensorflow import TensorflowModel\nfrom .build_model import build_"
  },
  {
    "path": "modelvshuman/models/tensorflow/tf_hub_model_url.py",
    "chars": 358,
    "preview": "tfhub_urls = {\n    \"efficientnet_b0\": \"https://tfhub.dev/google/efficientnet/b0/classification/1\",\n    \"resnet50\": \"http"
  },
  {
    "path": "modelvshuman/models/wrappers/__init__.py",
    "chars": 74,
    "preview": "from .pytorch import PytorchModel\nfrom .tensorflow import TensorflowModel\n"
  },
  {
    "path": "modelvshuman/models/wrappers/base.py",
    "chars": 200,
    "preview": "from abc import ABC, abstractmethod\n\nclass AbstractModel(ABC):\n\n    @abstractmethod\n    def softmax(self, logits):\n     "
  },
  {
    "path": "modelvshuman/models/wrappers/pytorch.py",
    "chars": 7067,
    "preview": "import math\nimport PIL\nimport clip\nimport numpy as np\nimport torch\nfrom PIL.Image import Image\nfrom torchvision.transfor"
  },
  {
    "path": "modelvshuman/models/wrappers/tensorflow.py",
    "chars": 949,
    "preview": "import numpy as np\nimport tensorflow as tf\nfrom skimage.transform import resize\n\ntf.compat.v1.enable_eager_execution()\nf"
  },
  {
    "path": "modelvshuman/plotting/__init__.py",
    "chars": 42,
    "preview": "from . import analyses\nfrom . import plot\n"
  },
  {
    "path": "modelvshuman/plotting/analyses.py",
    "chars": 12432,
    "preview": "\"\"\"\nAnalyses based on .csv files containing experimental data.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\nfro"
  },
  {
    "path": "modelvshuman/plotting/colors.py",
    "chars": 649,
    "preview": "\"\"\"\nDefine color scheme.\n\"\"\"\n\n\ndef rgb(r, g, b, divide_by=255.0):\n    \"\"\"Convenience function: return colour in [0, 1].\""
  },
  {
    "path": "modelvshuman/plotting/decision_makers.py",
    "chars": 3715,
    "preview": "# /!usr/bin/env python3\n\n\"\"\"\nDefine decision makers (either human participants or CNN models).\n\"\"\"\n\nfrom dataclasses imp"
  },
  {
    "path": "modelvshuman/plotting/plot.py",
    "chars": 48579,
    "preview": "# /!usr/bin/env python3\n\n\"\"\"\nPlotting functionality\n\"\"\"\n\nimport copy\nimport logging\nimport os\nfrom os.path import join a"
  },
  {
    "path": "modelvshuman/utils.py",
    "chars": 3530,
    "preview": "import logging\nimport os\nimport sys\nimport shutil\nimport requests\nfrom os.path import join\n\nimport torchvision.models as"
  },
  {
    "path": "modelvshuman/version.py",
    "chars": 41,
    "preview": "__version__ = \"0.1\"\nVERSION = __version__"
  },
  {
    "path": "raw-data/colour/colour_subject-01_session_1.csv",
    "chars": 119655,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.796405076980591,oven,oven,cr,0001_cl"
  },
  {
    "path": "raw-data/colour/colour_subject-02_session_1.csv",
    "chars": 119718,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,1.12876987457275,bottle,bottle,cr,0001"
  },
  {
    "path": "raw-data/colour/colour_subject-03_session_1.csv",
    "chars": 119501,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.586735963821411,boat,boat,cr,0001_cl"
  },
  {
    "path": "raw-data/colour/colour_subject-04_session_1.csv",
    "chars": 119474,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,1.18576312065125,knife,knife,bw,0001_c"
  },
  {
    "path": "raw-data/contrast/contrast_subject-01_session_1.csv",
    "chars": 123138,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,1.05363798141479,knife,knife,c30,0001_"
  },
  {
    "path": "raw-data/contrast/contrast_subject-02_session_1.csv",
    "chars": 123624,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.670699119567871,boat,airplane,c30,00"
  },
  {
    "path": "raw-data/contrast/contrast_subject-03_session_1.csv",
    "chars": 123467,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,1.24921989440918,bottle,elephant,c03,0"
  },
  {
    "path": "raw-data/contrast/contrast_subject-04_session_1.csv",
    "chars": 123831,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.906422853469849,cat,cat,c01,0001_cop"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-01_session_1.csv",
    "chars": 116385,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,1.13377118110657,bird,bird,0,0001_s5n_"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-02_session_1.csv",
    "chars": 116988,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,1.03254890441895,oven,clock,0,0001_s5n"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-03_session_1.csv",
    "chars": 117186,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.68296217918396,clock,clock,0,0001_s5"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-04_session_1.csv",
    "chars": 115974,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.936746120452881,truck,truck,0,0001_s"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-05_session_1.csv",
    "chars": 117282,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-05,1,1,1.17140913009644,bear,bear,0,0001_s5n_"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-06_session_1.csv",
    "chars": 117128,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-06,1,1,1.32128190994263,bird,cat,0,0001_s5n_s"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-07_session_1.csv",
    "chars": 116855,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-07,1,1,1.2597861289978,oven,car,0,0001_s5n_s0"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-08_session_1.csv",
    "chars": 116968,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-08,1,1,1.29922008514404,bottle,truck,0,0001_s"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-09_session_1.csv",
    "chars": 117133,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-09,1,1,0.53160285949707,clock,clock,0,0001_s5"
  },
  {
    "path": "raw-data/cue-conflict/cue-conflict_subject-10_session_1.csv",
    "chars": 116728,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-10,1,1,0.84811806678772,oven,airplane,0,0001_"
  },
  {
    "path": "raw-data/edge/edge_subject-01_session_1.csv",
    "chars": 13454,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.868942022323608,knife,oven,0,0001_ed"
  },
  {
    "path": "raw-data/edge/edge_subject-02_session_1.csv",
    "chars": 13467,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.86748194694519,cat,cat,0,0001_edg_s0"
  },
  {
    "path": "raw-data/edge/edge_subject-03_session_1.csv",
    "chars": 13426,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.825762987136841,bicycle,bicycle,0,00"
  },
  {
    "path": "raw-data/edge/edge_subject-04_session_1.csv",
    "chars": 13399,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,1.38495302200317,oven,bird,0,0001_edg_"
  },
  {
    "path": "raw-data/edge/edge_subject-05_session_1.csv",
    "chars": 13448,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-05,1,1,0.982416868209839,truck,truck,0,0001_e"
  },
  {
    "path": "raw-data/edge/edge_subject-06_session_1.csv",
    "chars": 13453,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-06,1,1,0.851415872573853,truck,truck,0,0001_e"
  },
  {
    "path": "raw-data/edge/edge_subject-07_session_1.csv",
    "chars": 13388,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-07,1,1,0.915823936462402,car,car,0,0001_edg_s"
  },
  {
    "path": "raw-data/edge/edge_subject-08_session_1.csv",
    "chars": 13464,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-08,1,1,0.685027837753296,elephant,elephant,0,"
  },
  {
    "path": "raw-data/edge/edge_subject-09_session_1.csv",
    "chars": 12860,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-09,1,1,0.940936088562012,airplane,airplane,0,"
  },
  {
    "path": "raw-data/edge/edge_subject-10_session_1.csv",
    "chars": 13452,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-10,1,1,0.74855899810791,keyboard,keyboard,0,0"
  },
  {
    "path": "raw-data/eidolonI/eidolonI_subject-01_session_1.csv",
    "chars": 135003,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.828052043914795,oven,oven,1-10-10,00"
  },
  {
    "path": "raw-data/eidolonI/eidolonI_subject-02_session_1.csv",
    "chars": 134724,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,1.01900005340576,clock,clock,4-10-10,0"
  },
  {
    "path": "raw-data/eidolonI/eidolonI_subject-03_session_1.csv",
    "chars": 134513,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.741258144378662,cat,dog,16-10-10,000"
  },
  {
    "path": "raw-data/eidolonI/eidolonI_subject-04_session_1.csv",
    "chars": 133985,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,1.50014805793762,bird,bottle,64-10-10,"
  },
  {
    "path": "raw-data/eidolonII/eidolonII_subject-01_session_1.csv",
    "chars": 132413,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.969465970993042,oven,car,64-3-10,000"
  },
  {
    "path": "raw-data/eidolonII/eidolonII_subject-02_session_1.csv",
    "chars": 131812,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.838385105133057,cat,bottle,16-3-10,0"
  },
  {
    "path": "raw-data/eidolonII/eidolonII_subject-03_session_1.csv",
    "chars": 131938,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,1.12425494194031,dog,knife,16-3-10,000"
  },
  {
    "path": "raw-data/eidolonII/eidolonII_subject-04_session_1.csv",
    "chars": 131912,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,1.08663988113403,bear,dog,8-3-10,0001_"
  },
  {
    "path": "raw-data/eidolonIII/eidolonIII_subject-01_session_1.csv",
    "chars": 132504,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,1.176265001297,bear,oven,16-0-10,0001_"
  },
  {
    "path": "raw-data/eidolonIII/eidolonIII_subject-02_session_1.csv",
    "chars": 131838,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,1.07026100158691,car,car,8-0-10,0001_e"
  },
  {
    "path": "raw-data/eidolonIII/eidolonIII_subject-03_session_1.csv",
    "chars": 131375,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.708024978637695,car,car,2-0-10,0001_"
  },
  {
    "path": "raw-data/eidolonIII/eidolonIII_subject-04_session_1.csv",
    "chars": 132765,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.963965892791748,bear,airplane,2-0-10"
  },
  {
    "path": "raw-data/false-colour/false-colour_subject-01_session_1.csv",
    "chars": 109958,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.809584140777588,knife,knife,true,000"
  },
  {
    "path": "raw-data/false-colour/false-colour_subject-02_session_1.csv",
    "chars": 110149,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,1.38158011436462,car,elephant,true,000"
  },
  {
    "path": "raw-data/false-colour/false-colour_subject-03_session_1.csv",
    "chars": 110060,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.497289896011353,boat,boat,true,0001_"
  },
  {
    "path": "raw-data/false-colour/false-colour_subject-04_session_1.csv",
    "chars": 110175,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.79400110244751,truck,truck,false,000"
  },
  {
    "path": "raw-data/high-pass/high-pass_subject-01_session_1.csv",
    "chars": 121047,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,1.33161187171936,clock,boat,0.7,0001_h"
  },
  {
    "path": "raw-data/high-pass/high-pass_subject-02_session_1.csv",
    "chars": 120992,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.944271087646484,keyboard,clock,1.5,0"
  },
  {
    "path": "raw-data/high-pass/high-pass_subject-03_session_1.csv",
    "chars": 120749,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.939826011657715,boat,elephant,1,0001"
  },
  {
    "path": "raw-data/high-pass/high-pass_subject-04_session_1.csv",
    "chars": 120518,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,1.03202104568481,boat,chair,0.45,0001_"
  },
  {
    "path": "raw-data/low-pass/low-pass_subject-01_session_1.csv",
    "chars": 117433,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.945574045181274,cat,chair,40,0001_lp"
  },
  {
    "path": "raw-data/low-pass/low-pass_subject-02_session_1.csv",
    "chars": 117653,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,1.01214194297791,elephant,elephant,10,"
  },
  {
    "path": "raw-data/low-pass/low-pass_subject-03_session_1.csv",
    "chars": 117877,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.859899997711182,car,clock,3,0001_lp_"
  },
  {
    "path": "raw-data/low-pass/low-pass_subject-04_session_1.csv",
    "chars": 117346,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,1.14607691764832,bear,cat,15,0001_lp_s"
  },
  {
    "path": "raw-data/phase-scrambling/phase-scrambling_subject-01_session_1.csv",
    "chars": 104557,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.773930072784424,truck,truck,30,0001_"
  },
  {
    "path": "raw-data/phase-scrambling/phase-scrambling_subject-02_session_1.csv",
    "chars": 104693,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.892426013946533,oven,keyboard,150,00"
  },
  {
    "path": "raw-data/phase-scrambling/phase-scrambling_subject-03_session_1.csv",
    "chars": 105159,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,1.02795696258545,boat,bottle,60,0001_p"
  },
  {
    "path": "raw-data/phase-scrambling/phase-scrambling_subject-04_session_1.csv",
    "chars": 104626,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.821409940719604,bear,car,180,0001_ps"
  },
  {
    "path": "raw-data/power-equalisation/power-equalisation_subject-01_session_1.csv",
    "chars": 105484,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.799072027206421,clock,clock,0,0001_p"
  },
  {
    "path": "raw-data/power-equalisation/power-equalisation_subject-02_session_1.csv",
    "chars": 105740,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.955517053604126,dog,dog,pow,0001_pow"
  },
  {
    "path": "raw-data/power-equalisation/power-equalisation_subject-03_session_1.csv",
    "chars": 105623,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,1.47366499900818,boat,airplane,pow,000"
  },
  {
    "path": "raw-data/power-equalisation/power-equalisation_subject-04_session_1.csv",
    "chars": 105473,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.855642080307007,oven,chair,pow,0001_"
  },
  {
    "path": "raw-data/rotation/rotation_subject-01_session_1.csv",
    "chars": 121352,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,1.15696287155151,keyboard,chair,0,0001"
  },
  {
    "path": "raw-data/rotation/rotation_subject-02_session_1.csv",
    "chars": 121156,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.842274904251099,knife,knife,180,0001"
  },
  {
    "path": "raw-data/rotation/rotation_subject-03_session_1.csv",
    "chars": 121278,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.857604026794434,airplane,airplane,18"
  },
  {
    "path": "raw-data/rotation/rotation_subject-04_session_1.csv",
    "chars": 121556,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.66027307510376,bicycle,bicycle,180,0"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-01_session_1.csv",
    "chars": 13526,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.857651233673096,boat,boat,0,0001_sif"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-02_session_1.csv",
    "chars": 13319,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,1.4882390499115,bear,bear,0,0001_sif_s"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-03_session_1.csv",
    "chars": 13426,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,1.15690112113953,airplane,keyboard,0,0"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-04_session_1.csv",
    "chars": 13447,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,1.06918811798096,airplane,airplane,0,0"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-05_session_1.csv",
    "chars": 13336,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-05,1,1,1.17234897613525,boat,boat,0,0001_sif_"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-06_session_1.csv",
    "chars": 13366,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-06,1,1,NaN,na,bicycle,0,0001_sif_s06_0_bicycl"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-07_session_1.csv",
    "chars": 13417,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-07,1,1,1.03786706924438,boat,boat,0,0001_sif_"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-08_session_1.csv",
    "chars": 13373,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-08,1,1,0.629379987716675,bird,bird,0,0001_sif"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-09_session_1.csv",
    "chars": 13391,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-09,1,1,0.851751089096069,bear,bear,0,0001_sif"
  },
  {
    "path": "raw-data/silhouette/silhouette_subject-10_session_1.csv",
    "chars": 13409,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-10,1,1,0.765292882919312,chair,chair,0,0001_s"
  },
  {
    "path": "raw-data/sketch/sketch_subject-01_session_1.csv",
    "chars": 78402,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.586625099182129,boat,boat,0,0001_ske"
  },
  {
    "path": "raw-data/sketch/sketch_subject-02_session_1.csv",
    "chars": 78442,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.636170864105225,bear,bear,0,0001_ske"
  },
  {
    "path": "raw-data/sketch/sketch_subject-03_session_1.csv",
    "chars": 78130,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,0.887437105178833,bird,bird,0,0001_ske"
  },
  {
    "path": "raw-data/sketch/sketch_subject-04_session_1.csv",
    "chars": 78404,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.731143951416016,bicycle,bicycle,0,00"
  },
  {
    "path": "raw-data/sketch/sketch_subject-05_session_1.csv",
    "chars": 78056,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-05,1,1,0.783522844314575,bird,bird,0,0001_ske"
  },
  {
    "path": "raw-data/sketch/sketch_subject-06_session_1.csv",
    "chars": 78289,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-06,1,1,0.886153936386108,knife,knife,0,0001_s"
  },
  {
    "path": "raw-data/sketch/sketch_subject-07_session_1.csv",
    "chars": 78181,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-07,1,1,0.788907051086426,cat,cat,0,0001_ske_s"
  },
  {
    "path": "raw-data/stylized/stylized_subject-01_session_1.csv",
    "chars": 89823,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.701272964477539,bicycle,bicycle,0,00"
  },
  {
    "path": "raw-data/stylized/stylized_subject-02_session_1.csv",
    "chars": 89312,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,0.6699538230896,boat,bicycle,0,0001_st"
  },
  {
    "path": "raw-data/stylized/stylized_subject-03_session_1.csv",
    "chars": 89161,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,1.29065608978271,elephant,elephant,0,0"
  },
  {
    "path": "raw-data/stylized/stylized_subject-04_session_1.csv",
    "chars": 89271,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,1.04218292236328,oven,airplane,0,0001_"
  },
  {
    "path": "raw-data/stylized/stylized_subject-05_session_1.csv",
    "chars": 89552,
    "preview": "subj,session,trial,rt,object_response,category,condition,imagename\nsubject-05,1,1,1.04852104187012,bear,bear,0,0001_sty_"
  },
  {
    "path": "raw-data/uniform-noise/uniform-noise_subject-01_session_1.csv",
    "chars": 125257,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-01,1,1,0.813569068908691,oven,elephant,0.03,0"
  },
  {
    "path": "raw-data/uniform-noise/uniform-noise_subject-02_session_1.csv",
    "chars": 126009,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-02,1,1,1.00961899757385,clock,clock,0.00,0001"
  },
  {
    "path": "raw-data/uniform-noise/uniform-noise_subject-03_session_1.csv",
    "chars": 125739,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-03,1,1,1.4200279712677,bear,elephant,0.20,000"
  },
  {
    "path": "raw-data/uniform-noise/uniform-noise_subject-04_session_1.csv",
    "chars": 125571,
    "preview": "subj,Session,trial,rt,object_response,category,condition,imagename\nsubject-04,1,1,0.867391109466553,truck,truck,0.20,000"
  },
  {
    "path": "setup.cfg",
    "chars": 2876,
    "preview": "[metadata]\nname = modelvshuman\nversion = 0.1\nauthor =\nauthor-email =\nhome-page = https://github.com/bethgelab/model-vs-h"
  },
  {
    "path": "setup.py",
    "chars": 40,
    "preview": "from distutils.core import setup\nsetup()"
  }
]

About this extraction

This page contains the full source code of the bethgelab/model-vs-human GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 178 files (8.2 MB), approximately 2.2M tokens, and a symbol index with 410 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!