[
  {
    "path": ".github/FUNDING.yml",
    "content": "# These are supported funding model platforms\n\ncustom: ['https://www.continualai.org/supporters']\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n.idea/\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/"
  },
  {
    "path": "CITATION.cff",
    "content": "# Copyright (c) ContinualAI\ncff-version: 1.2.0\ntitle: \"Avalanche: an End-to-End Library for Continual Learning\"\nauthors:\n  - name: \"The Avalanche team\"\nmessage: \"If you used Avalanche in your research project, please remember to cite our paper.\"\npreferred-citation:\n  authors:\n    - family-names: Lomonaco\n      given-names: Vincenzo\n    - family-names: Pellegrini\n      given-names: Lorenzo\n    - family-names: Cossu\n      given-names: Andrea\n    - family-names: Carta\n      given-names: Antonio\n    - family-names: Graffieti\n      given-names: Gabriele\n    - family-names: Hayes\n      given-names: Tyler L.\n    - family-names: De Lange\n      given-names: Matthias\n    - family-names: Masana\n      given-names: Marc\n    - family-names: Pomponi\n      given-names: Jary \n    - family-names: Van de Ven\n      given-names: Gido\n    - family-names: Mundt\n      given-names: Martin\n    - family-names: She\n      given-names: Qi\n    - family-names: Cooper\n      given-names: Keiland\n    - family-names: Forest\n      given-names: Jeremy\n    - family-names: Belouadah\n      given-names: Eden\n    - family-names: Calderara\n      given-names: Simone\n    - family-names: Parisi\n      given-names: German I.\n    - family-names: Cuzzolin\n      given-names: Fabio\n    - family-names: Tolias\n      given-names: Andreas\n    - family-names: Scardapane\n      given-names: Simone\n    - family-names: Antiga\n      given-names: Luca\n    - family-names: Amhad\n      given-names: Subutai\n    - family-names: Popescu\n      given-names: Adrian\n    - family-names: Kanan\n      given-names: Christopher\n    - family-names: Van de Weijer\n      given-names: Joost\n    - family-names: Tuytelaars\n      given-names: Tinne\n    - family-names: Bacciu\n      given-names: Davide\n    - family-names: Maltoni\n      given-names: Davide\n  title: \"Avalanche: an End-to-End Library for Continual Learning\"\n  type: proceedings\n  year: 2021\n  conference:\n    name: \"2nd Continual Learning in Computer Vision Workshop\"\n  publisher:\n    name: \"Proceedings of IEEE Conference on Computer Vision and Pattern Recognition\"\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021 ContinualAI\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\n    \n# Continual Learning Baselines\n**[Avalanche Website](https://avalanche.continualai.org)** | **[Avalanche Repository](https://github.com/ContinualAI/avalanche)**\n\n</div>\n\n<p align=\"center\">\n    <img src=\"https://www.dropbox.com/s/90thp7at72sh9tj/avalanche_logo_with_clai.png?raw=1\"/>\n</p>\n\n\n\n**This project provides a set of examples with popular continual learning strategies and baselines. \nYou can easily run experiments to reproduce results from original paper or tweak the hyperparameters to get your own results.  Sky is the limit!**\n\nTo guarantee fair implementations, we rely on the **[Avalanche](https://github.com/ContinualAI/avalanche)** library, developed and maintained by *[ContinualAI](https://www.continualai.org/)*.\nFeel free to check it out and support the project!\n\n## Experiments\nThe tables below describes all the experiments currently implemented in the `experiments` folder, along with their result.\nThe tables are not meant to compare different methods but rather as a reference for their performance. Different methods may use \nslightly different setups (e.g., starting from a pre-trained model or from scratch), so it does not always make sense to compare them.\n\nIf an experiment reproduces exactly the results of a paper in terms of `Performance` (even if with different hyper-parameters), it is marked with ✅ on the `Reproduced` column. Otherwise, it is marked with ❌.   \n`Avalanche` means that we could not find any specific paper as reference and we used the performance of Avalanche obtained when the strategy was first add to the library.  \nIf the `Performance` is much worse than the expected one, the `bug` tag is used in the `Reproduced` column.    \nFinally, the `Reference` column reports the expected performance, together with a link to the associated paper (if any). Note that the link does not always point to the paper which introduced the strategy, since it sometimes differs from the one we used to get the target performance.\n\nACC means the Average Accuracy on all experiences after training on the last experience.   \n\nFirst, we report the results for the **non-online** continual learning case (a.k.a. batch continual learning). Then, we report the results for the **online continual learning** case.\n\n### Batch Continual Learning (non-online)\n\n|     Benchmarks      |              Strategy               |      Scenario      | Performance | Reference                                                                                                                  | Reproduced    |\n|:-------------------:|:-----------------------------------:|:------------------:|:-----------:|:---------------------------------------------------------------------------------------------------------------------------|:--------------|\n|   Permuted MNIST    |    Less-Forgetful Learning (LFL)    | Domain-Incremental |  ACC=0.88   | ACC=0.88                                                                                                                   | ✅ `Avalanche` | \n|   Permuted MNIST    | Elastic Weight Consolidation (EWC)  | Domain-Incremental |  ACC=0.83   | [ACC=0.94](https://www.pnas.org/content/114/13/3521)                                                                       | ❌             |\n|   Permuted MNIST    |     Synaptic Intelligence (SI)      | Domain-Incremental |  ACC=0.83   | [ACC=0.95](http://proceedings.mlr.press/v70/zenke17a.html)                                                                 | ❌             |\n|   Split CIFAR-100   |               LaMAML                |  Task-Incremental  |  ACC=0.70   | [ACC=0.70](https://arxiv.org/abs/2007.13904)                                                                               | ✅             |\n|   Split CIFAR-100   |                iCaRL                | Class-Incremental  |  ACC=0.48   | [ACC=0.50](https://openaccess.thecvf.com/content_cvpr_2017/html/Rebuffi_iCaRL_Incremental_Classifier_CVPR_2017_paper.html) | ✅             |\n|   Split CIFAR-100   |               Replay                | Class-Incremental  |  ACC=0.32   | ACC=0.32                                                                                                                   | ✅ `Avalanche` |\n|     Split MNIST     |                RWalk                |  Task-Incremental  |  ACC=0.99   | [ACC=0.99](https://openaccess.thecvf.com/content_ECCV_2018/html/Arslan_Chaudhry__Riemannian_Walk_ECCV_2018_paper.html)     | ✅             |\n|     Split MNIST     |     Synaptic Intelligence (SI)      |  Task-Incremental  |  ACC=0.97   | [ACC=0.97](http://proceedings.mlr.press/v70/zenke17a.html)                                                                 | ✅             |\n|     Split MNIST     |                GDumb                | Class-Incremental  |  ACC=0.97   | [ACC=0.97](https://link.springer.com/chapter/10.1007/978-3-030-58536-5_31)                                                 | ✅             |\n|     Split MNIST     |             GSS_greedy              | Class-Incremental  |  ACC=0.82   | [ACC=0.78](https://arxiv.org/abs/1903.08671)                                                                               | ❌             |\n|     Split MNIST     |       Generative Replay (GR)        | Class-Incremental  |  ACC=0.75   | [ACC=0.75](https://arxiv.org/abs/1705.08690)                                                                               | ✅             |\n|     Split MNIST     |  Learning without Forgetting (LwF)  | Class-Incremental  |  ACC=0.23   | [ACC=0.23](https://arxiv.org/pdf/1904.07734.pdf)                                                                           | ✅             |\n| Split Tiny ImageNet |               LaMAML                |  Task-Incremental  |  ACC=0.54   | [ACC=0.66](https://arxiv.org/abs/2007.13904)                                                                               | ❌             |\n| Split Tiny ImageNet |  Learning without Forgetting (LwF)  |  Task-Incremental  |  ACC=0.44   | [ACC=0.44](https://arxiv.org/pdf/1904.07734.pdf)                                                                           | ✅             |\n| Split Tiny ImageNet |     Memory Aware Synapses (MAS)     |  Task-Incremental  |  ACC=0.40   | [ACC=0.40](https://doi.org/10.1109/TPAMI.2021.3057446)                                                                     | ✅             |\n| Split Tiny ImageNet |              PackNet                |  Task-Incremental  |  ACC=0.46   | [ACC=0.47](https://doi.org/10.1109/TPAMI.2021.3057446) (Table 4 `SMALL`)                                                   | ✅             |\n\n### Online Continual Learning\n\n|   Benchmarks    |              Strategy               |      Scenario      | Performance | Reference                                                                                                 | Reproduced    |\n|:---------------:|:-----------------------------------:|:------------------:|:-----------:|:----------------------------------------------------------------------------------------------------------|:--------------|\n|     CORe50      |     Deep Streaming LDA (DSLDA)      | Class-Incremental  |  ACC=0.79   | [ACC=0.79](https://arxiv.org/abs/1909.01520)                                                              | ✅             | \n| Permuted MNIST  |                 GEM                 | Domain-Incremental |  ACC=0.80   | [ACC=0.83](https://proceedings.neurips.cc/paper/2017/hash/f87522788a2be2d171666752f97ddebb-Abstract.html) | ✅             |\n| Split CIFAR-10  |            Online Replay            | Class-Incremental  |  ACC=0.50   | ACC=0.50                                                                                                  | ✅ `Avalanche` |\n| Split CIFAR-10  |               ER-AML                | Class-Incremental  |  ACC=0.47   | [ACC=0.47](https://openreview.net/forum?id=N8MaByOzUfb)                                                   | ✅             |\n| Split CIFAR-10  |               ER-ACE                | Class-Incremental  |  ACC=0.45   | [ACC=0.52](https://openreview.net/forum?id=N8MaByOzUfb)                                                   | ✅             |\n| Split CIFAR-10  | Supervised Contrastive Replay (SCR) | Class-Incremental  |  ACC=0.36   | [ACC=0.48](https://ieeexplore.ieee.org/document/9522763)                                                  | ✅ `Avalanche` |\n| Permuted MNIST  |         Average GEM (AGEM)          | Domain-Incremental |  ACC=0.81   | [ACC=0.81](https://openreview.net/pdf?id=Hkf2_sC5FX)                                                      | ✅             | \n| Split CIFAR-100 |                 GEM                 |  Task-Incremental  |  ACC=0.63   | [ACC=0.63](https://proceedings.neurips.cc/paper/2017/hash/f87522788a2be2d171666752f97ddebb-Abstract.html) | ✅             |\n| Split CIFAR-100 |         Average GEM (AGEM)          |  Task-Incremental  |  ACC=0.62   | [ACC=0.62](https://openreview.net/pdf?id=Hkf2_sC5FX)                                                      | ✅             |\n| Split CIFAR-100 |               ER-ACE                | Class-Incremental  |  ACC=0.24   | [ACC=0.25](https://openreview.net/forum?id=N8MaByOzUfb)                                                   | ✅             |\n| Split CIFAR-100 |               ER-AML                | Class-Incremental  |  ACC=0.24   | [ACC=0.24](https://openreview.net/forum?id=N8MaByOzUfb)                                                   | ✅             |\n| Split CIFAR-100 |            Online Replay            | Class-Incremental  |  ACC=0.21   | ACC=0.21                                                                                                  | ✅ `Avalanche` |\n|   Split MNIST   |                CoPE                 | Class-Incremental  |  ACC=0.93   | [ACC=0.93](https://arxiv.org/abs/2009.00919)                                                              | ✅             |\n|   Split MNIST   |            Online Replay            | Class-Incremental  |  ACC=0.92   | ACC=0.92                                                                                                  | ✅ `Avalanche` |\n\n## Python dependencies for experiments\nOutside Python standard library, the main packages required to run the experiments are PyTorch, Avalanche and Pandas. \n* **Avalanche**: The latest version of this repo requires the latest Avalanche version (from master branch): `pip install git+https://github.com/ContinualAI/avalanche.git`. The CL baselines repo is tagged with the supported Avalanche version (you can browse the tags to check out all the versions). You can install the corresponding Avalanche versions with `pip install avalanche-lib==[version number]`, where `[version number]` is of the form `0.1.0`.\nFor some strategies (e.g., LaMAML) you may need to install Avalanche with extra packages, like `pip install avalanche-lib[extra]`. \nFor more details on how to install Avalanche, please check out the complete guide [here](https://avalanche.continualai.org/getting-started/how-to-install). \n* **PyTorch**: we recommend to follow [the official guide](https://pytorch.org/get-started/locally/).\n* **Pandas**: `pip install pandas`. [Official guide](https://pandas.pydata.org/docs/getting_started/install.html#installing-pandas).\n\n\n## Run experiments with Python\nPlace yourself into the project root folder.\n\nExperiments can be run with a python script by simply importing the function from the `experiments` folder and executing it.  \nBy default, experiments will run on GPU, when available.\n\nThe input argument to each experiment is an optional dictionary of parameters to be used in the experiments. If `None`, default\nparameters (taken from original paper) will be used.\n\n```python\nfrom experiments.split_mnist import synaptic_intelligence_smnist  # select the experiment\n\n # can be None to use default parameters\ncustom_hyperparameters = {'si_lambda': 0.01, 'cuda': -1, 'seed': 3}\n\n# run the experiment\nresult = synaptic_intelligence_smnist(custom_hyperparameters)\n\n# dictionary of avalanche metrics\nprint(result)  \n```\n\n## Command line experiments\nPlace yourself into the project root folder.   \nYou should add the project root folder to your PYTHONPATH. \n\nFor example, on Linux you can set it up globally:\n```bash\nexport PYTHONPATH=${PYTHONPATH}:/path/to/continual-learning-baselines\n```\nor just for the current command:\n```bash\nPYTHONPATH=${PYTHONPATH}:/path/to/continual-learning-baselines command to be executed\n```\n\nYou can run experiments directly through console with the default parameters.  \nOpen the console and run the python file you want by specifying its path.\n\nFor example, to run Synaptic Intelligence on Split MNIST: \n```bash\npython experiments/split_mnist/synaptic_intelligence.py\n```\n\nTo execute experiment with custom parameters, please refer to the previous section.\n\n\n## Run tests\nPlace yourself into the project root folder.\n\nYou can run all tests with\n```bash\npython -m unittest\n```\n\nor you can specify a test by providing the test name in the format `tests.strategy_class_name.test_benchmarkname`.\n\nFor example to run Synaptic Intelligence on Split MNIST you can run:\n```bash\npython -m unittest tests.SynapticIntelligence.test_smnist\n```\n\n## Cite\nIf you used this repo you automatically used Avalanche, please remember to cite our reference paper published at the [CLVision @ CVPR2021](https://sites.google.com/view/clvision2021/overview?authuser=0) workshop: [\"Avalanche: an End-to-End Library for Continual Learning\"](https://arxiv.org/abs/2104.00405). \nThis will help us make Avalanche better known in the machine learning community, ultimately making it a better tool for everyone:\n\n```\n@InProceedings{lomonaco2021avalanche,\n    title={Avalanche: an End-to-End Library for Continual Learning},\n    author={Vincenzo Lomonaco and Lorenzo Pellegrini and Andrea Cossu and Antonio Carta and Gabriele Graffieti and Tyler L. Hayes and Matthias De Lange and Marc Masana and Jary Pomponi and Gido van de Ven and Martin Mundt and Qi She and Keiland Cooper and Jeremy Forest and Eden Belouadah and Simone Calderara and German I. Parisi and Fabio Cuzzolin and Andreas Tolias and Simone Scardapane and Luca Antiga and Subutai Amhad and Adrian Popescu and Christopher Kanan and Joost van de Weijer and Tinne Tuytelaars and Davide Bacciu and Davide Maltoni},\n    booktitle={Proceedings of IEEE Conference on Computer Vision and Pattern Recognition},\n    series={2nd Continual Learning in Computer Vision Workshop},\n    year={2021}\n}\n```\n\n## Contribute to the project\nWe are always looking for new contributors willing to help us in the challenging mission of providing robust experiments\nto the community. Would you like to join us? The steps are easy!\n\n1. Take a look at the opened issues and find yours\n2. Fork this repo and write an experiment (see next section)\n3. Submit a PR and receive support from the maintainers\n4. Merge the PR, your contribution is now included in the project!\n\n\n### Write an experiment\n1. Create the appropriate script into `experiments/benchmark_folder`. If the benchmark is not present, you can add one.\n2. Fill the `experiment.py` file with your code, following the style of the other experiments. The script should return the metrics used by the related test.\n3. Add to `tests/target_results.csv` the expected result for your experiment. You can add a number or a list of numbers.\n4. Write the unit test in `tests/strategy_folder/experiment.py`. Follow the very simple structure of existing tests.\n5. Update table in `README.md`.\n\n\n### Find the avalanche commit which produced a regression\n1. Place yourself into the avalanche folder and make sure you are using the avalanche version from that repository \nin your python environment (it is usually enough to add `/path/to/avalanche` to your `PYTHONPATH`). \n2. Use the `gitbisect_test.sh` (provided in this repository) in combination with `git bisect` to retrieve the avalanche commit introducing the regression.  \n`git bisect start HEAD v0.1.0 -- # HEAD (current version) is bad, v0.1.0 is good`  \n`git bisect run /path/to/gitbisect_test.sh /path/to/continual-learning-baselines optional_test_name`  \n`git bisect reset`\n3. The `gitbisect_test.sh` script requires a mandatory parameter pointing to the `continual-learning-baselines`\ndirectory and an optional parameter specifying the path to a particular unittest (e.g., `tests.EWC.test_pmnist`).\nIf the second parameter is not given, all the unit tests will be run.\n4. The terminal output will tell you which commit introduced the bug\n5. You can change the `HEAD` and `v0.1.0` ref to any avalanche commit.\n\n"
  },
  {
    "path": "__init__.py",
    "content": "from . import experiments\nfrom . import models\nfrom . import tests\n"
  },
  {
    "path": "example_run.py",
    "content": "\"\"\"\nThis script shows how to run an experiment on a specific strategy and benchmark.\nYou can override default parameters by providing a dictionary as input to the method.\nYou can find all the parameters used by the experiment in the source file of the experiment.\n\"\"\"\n\n# select the experiment\nfrom experiments.split_mnist import synaptic_intelligence_smnist\n\n# run the experiment with custom parameters (do not provide arguments to use default parameters)\nsynaptic_intelligence_smnist({'learning_rate': 1e-3, 'si_lambda': 1})\n"
  },
  {
    "path": "experiments/__init__.py",
    "content": "from . import split_mnist\nfrom . import permuted_mnist\nfrom . import split_tiny_imagenet\nfrom . import split_cifar100\nfrom . import split_cifar10\nfrom . import core50\nfrom . import utils\n"
  },
  {
    "path": "experiments/core50/__init__.py",
    "content": "from .deep_slda import deep_slda_core50\n"
  },
  {
    "path": "experiments/core50/deep_slda.py",
    "content": "import warnings\nimport torch\nimport avalanche as avl\nfrom avalanche.evaluation.metrics import loss_metrics, accuracy_metrics, forgetting_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom torchvision import transforms\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef deep_slda_core50(override_args=None):\n    \"\"\"\n    \"Lifelong Machine Learning with Deep Streaming Linear Discriminant Analysis\"\n    by Hayes et. al. (2020).\n    https://arxiv.org/abs/1909.01520\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'feature_size': 512, 'batch_size': 512,\n                                'shrinkage': 1e-4, 'plastic_cov': True, 'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    _mu = [0.485, 0.456, 0.406]  # imagenet normalization\n    _std = [0.229, 0.224, 0.225]\n    transform = transforms.Compose([\n        transforms.Resize((224, 224)),\n        transforms.ToTensor(),\n        transforms.Normalize(mean=_mu,\n                             std=_std)\n    ])\n\n    benchmark = avl.benchmarks.CORe50(scenario='nc', train_transform=transform, eval_transform=transform)\n\n    eval_plugin = avl.training.plugins.EvaluationPlugin(\n        loss_metrics(epoch=True, experience=True, stream=True),\n        accuracy_metrics(epoch=True, experience=True, stream=True),\n        forgetting_metrics(experience=True, stream=True),\n        loggers=[InteractiveLogger()]\n    )\n\n    criterion = torch.nn.CrossEntropyLoss()\n    model = avl.models.SLDAResNetModel(device=device, arch='resnet18',\n                                       imagenet_pretrained=True)\n\n    cl_strategy = avl.training.StreamingLDA(model, criterion,\n                                            args.feature_size, num_classes=50,\n                                            eval_mb_size=args.batch_size,\n                                            train_mb_size=args.batch_size,\n                                            train_epochs=1,\n                                            shrinkage_param=args.shrinkage,\n                                            streaming_update_sigma=args.plastic_cov,\n                                            device=device, evaluator=eval_plugin)\n\n    warnings.warn(\n        \"The Deep SLDA example is not perfectly aligned with \"\n        \"the paper implementation since it does not use a base \"\n        \"initialization phase and instead starts streming from \"\n        \"pre-trained weights. Performance should still match.\")\n\n    res = None\n    for i, exp in enumerate(benchmark.train_stream):\n        cl_strategy.train(exp)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = deep_slda_core50()\n    print(res)\n"
  },
  {
    "path": "experiments/permuted_mnist/__init__.py",
    "content": "from .synaptic_intelligence import synaptic_intelligence_pmnist\nfrom .gem import gem_pmnist\nfrom .ewc import ewc_pmnist\nfrom .agem import agem_pmnist\nfrom .lfl import lfl_pmnist\nfrom .naive import naive_pmnist\nfrom .mir import mir_pmnist\n"
  },
  {
    "path": "experiments/permuted_mnist/agem.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD, Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef agem_pmnist(override_args=None):\n    \"\"\"\n    \"Efficient Lifelong Learning with A-GEM\" by Chaudhry et. al. (2019).\n    https://openreview.net/pdf?id=Hkf2_sC5FX\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'patterns_per_exp': 250, 'hidden_size': 256,\n                                'hidden_layers': 2, 'epochs': 1, 'dropout': 0,\n                                'sample_size': 256,\n                                'learning_rate': 0.1, 'train_mb_size': 10,\n                                'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.PermutedMNIST(17)\n    model = MLP(hidden_size=args.hidden_size, hidden_layers=args.hidden_layers,\n                drop_rate=args.dropout)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.AGEM(\n        model, SGD(model.parameters(), lr=args.learning_rate), criterion,\n        patterns_per_exp=args.patterns_per_exp, sample_size=args.sample_size,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None \n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n    return res\n\n\nif __name__ == '__main__':\n    res = agem_pmnist()\n    print(res)\n"
  },
  {
    "path": "experiments/permuted_mnist/ewc.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef ewc_pmnist(override_args=None):\n    \"\"\"\n    \"Overcoming catastrophic forgetting in neural networks\" by Kirkpatrick et. al. (2017).\n    https://www.pnas.org/content/114/13/3521\n\n    Results are below the original paper, which scores around 94%.\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'ewc_lambda': 1, 'hidden_size': 512,\n                                'hidden_layers': 1, 'epochs': 10, 'dropout': 0,\n                                'ewc_mode': 'separate', 'ewc_decay': None,\n                                'learning_rate': 0.001, 'train_mb_size': 256,\n                                'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.PermutedMNIST(10)\n    model = MLP(hidden_size=args.hidden_size, hidden_layers=args.hidden_layers,\n                drop_rate=args.dropout)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.EWC(\n        model, SGD(model.parameters(), lr=args.learning_rate), criterion,\n        ewc_lambda=args.ewc_lambda, mode=args.ewc_mode, decay_factor=args.ewc_decay,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = ewc_pmnist()\n    print(res)\n"
  },
  {
    "path": "experiments/permuted_mnist/gem.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef gem_pmnist(override_args=None):\n    \"\"\"\n    \"Gradient Episodic Memory for Continual Learning\" by Lopez-paz et. al. (2017).\n    https://proceedings.neurips.cc/paper/2017/hash/f87522788a2be2d171666752f97ddebb-Abstract.html\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'patterns_per_exp': 1000, 'hidden_size': 100,\n                            'hidden_layers': 2, 'epochs': 1, 'dropout': 0,\n                            'mem_strength': 0.5, 'n_exp': 17,\n                            'learning_rate': 0.1, 'train_mb_size': 10, 'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.PermutedMNIST(args.n_exp)\n    model = MLP(hidden_size=args.hidden_size, hidden_layers=args.hidden_layers,\n                drop_rate=args.dropout)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.GEM(\n        model, SGD(model.parameters(), lr=args.learning_rate), criterion,\n        patterns_per_exp=args.patterns_per_exp, memory_strength=args.mem_strength,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = gem_pmnist()\n    print(res)\n"
  },
  {
    "path": "experiments/permuted_mnist/lfl.py",
    "content": "import avalanche as avl\nimport torch\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef lfl_pmnist(override_args=None):\n    \"\"\"\n    \"Less-forgetting Learning in Deep Neural Networks\"\n    Heechul Jung, Jeongwoo Ju, Minju Jung and Junmo Kim;\n    arXiv, 2016, https://arxiv.org/pdf/1607.00122.pdf\n\n    The Permuted MNIST benchmark was not used in the original paper.\n    We run LFL on Permuted MNIST for proper comparison with other strategies,\n    since the benchmarks used in the original papers are not commonly used in\n    Continual Learning.\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'lambda_e': [0.0001], 'epochs': 3,\n                                'hidden_size': 256, 'hidden_layers': 1,\n                                'learning_rate': 0.01, 'train_mb_size': 128, 'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    if not isinstance(args.lambda_e, (list, tuple)):\n        raise ValueError(\"lambda_e parameter should be a list of floating numbers. \" \n                         \"Provide list with one element to apply the same lambda_e \" \n                         \"to all experiences.\")\n\n    benchmark = avl.benchmarks.PermutedMNIST(4)\n    model = MLP(hidden_size=args.hidden_size, hidden_layers=args.hidden_layers)\n    optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)\n    criterion = torch.nn.CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n    eval_plugin = avl.training.plugins.EvaluationPlugin(\n        avl.evaluation.metrics.accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),\n        avl.evaluation.metrics.loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),\n        avl.evaluation.metrics.forgetting_metrics(experience=True),\n        loggers=[interactive_logger]\n    )\n\n    lambda_e = args.lambda_e[0] if len(args.lambda_e) == 1 else args.lambda_e\n\n    strategy = avl.training.LFL(\n        model,\n        optimizer,\n        criterion,\n        lambda_e=lambda_e,\n        train_epochs=args.epochs,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=256,\n        evaluator=eval_plugin\n    )\n\n    res = None\n    for experience in benchmark.train_stream:\n        strategy.train(experience)\n        res = strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = lfl_pmnist()\n    print(res)\n"
  },
  {
    "path": "experiments/permuted_mnist/mir.py",
    "content": "import numpy as np\nimport torch\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import PermutedMNIST\nfrom avalanche.benchmarks import benchmark_with_validation_stream\nfrom avalanche.benchmarks.scenarios import split_online_stream\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SimpleMLP\nfrom avalanche.training.plugins import EvaluationPlugin, MIRPlugin, ReplayPlugin\nfrom avalanche.training.supervised import Naive\nfrom experiments.utils import create_default_args, set_seed, restrict_dataset_size\n\n\ndef mir_pmnist(override_args=None):\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 500,\n            \"lr\": 0.05,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"subsample\": 50,\n            \"batch_size_mem\": 10,\n            \"dataset_size\": 1000,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n    scenario = PermutedMNIST(\n        10,\n        return_task_id=False,\n        seed=0,\n        train_transform=None,\n        eval_transform=None,\n    )\n    scenario = benchmark_with_validation_stream(scenario, 0.05)\n    scenario = restrict_dataset_size(scenario, args.dataset_size)\n    model = SimpleMLP(10, hidden_size=400, hidden_layers=1)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n    interactive_logger = InteractiveLogger()\n    loggers = [interactive_logger]\n    training_metrics = []\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    plugins = [\n        MIRPlugin(\n            mem_size=args.mem_size, subsample=args.subsample, batch_size_mem=args.batch_size_mem\n        )\n    ]\n    cl_strategy = Naive(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n    )\n\n    ocl_scenario = split_online_stream(\n        original_stream=scenario.train_stream,\n        experience_size=10,\n        access_task_boundaries=False,\n    )\n\n    for t, experience in enumerate(ocl_scenario):\n        cl_strategy.train(\n            experience,\n            eval_streams=[],\n            num_workers=0,\n            drop_last=True,\n        )\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == \"__main__\":\n    res = mir_pmnist()\n    print(res)\n"
  },
  {
    "path": "experiments/permuted_mnist/naive.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef naive_pmnist(override_args=None):\n    \"\"\"\n    \"Continual Learning Through Synaptic Intelligence\" by Zenke et. al. (2017).\n    http://proceedings.mlr.press/v70/zenke17a.html\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'epochs': 20,\n                                'learning_rate': 0.001, 'train_mb_size': 256, 'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.PermutedMNIST(10)\n    model = MLP(hidden_size=2000, hidden_layers=2, relu_act=True)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.Naive(\n        model, Adam(model.parameters(), lr=args.learning_rate), criterion,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = naive_pmnist()\n    print(res)\n"
  },
  {
    "path": "experiments/permuted_mnist/synaptic_intelligence.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef synaptic_intelligence_pmnist(override_args=None):\n    \"\"\"\n    \"Continual Learning Through Synaptic Intelligence\" by Zenke et. al. (2017).\n    http://proceedings.mlr.press/v70/zenke17a.html\n\n    Results are below the original paper, which has a score around 97%\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'si_lambda': 10, 'si_eps': 0.1, 'epochs': 10,\n                                'learning_rate': 0.001, 'train_mb_size': 256, 'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.PermutedMNIST(10)\n    model = MLP(hidden_size=1000, hidden_layers=1, relu_act=True)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.SynapticIntelligence(\n        model, Adam(model.parameters(), lr=args.learning_rate), criterion,\n        si_lambda=args.si_lambda, eps=args.si_eps,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = synaptic_intelligence_pmnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar10/__init__.py",
    "content": "from .online_replay import online_replay_scifar10\nfrom .mir import mir_scifar10\nfrom .er_ace import erace_scifar10\nfrom .er_aml import eraml_scifar10\nfrom .supervised_contrastive_replay import online_scr_scifar10\n"
  },
  {
    "path": "experiments/split_cifar10/er_ace.py",
    "content": "#!/usr/bin/env python3\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitCIFAR10\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SlimResNet18\nfrom avalanche.models.dynamic_modules import IncrementalClassifier\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom avalanche.training.supervised import Naive, ER_ACE\nfrom experiments.utils import create_default_args, set_seed\n\n\ndef erace_scifar10(override_args=None):\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 1000,\n            \"lr\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 10,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(10)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    unique_transform = transforms.Compose(\n        [\n            transforms.ToTensor(),\n            transforms.Normalize(\n                (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)\n            ),\n        ]\n    )\n    \n    scenario = SplitCIFAR10(\n        5,\n        return_task_id=False,\n        seed=0,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n        train_transform=unique_transform,\n        eval_transform=unique_transform,\n    )\n\n    input_size = (3, 32, 32)\n    model = SlimResNet18(1)\n    model.linear = IncrementalClassifier(model.linear.in_features, 1)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n    interactive_logger = InteractiveLogger()\n    loggers = [interactive_logger]\n    training_metrics = []\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    plugins = []\n    cl_strategy = ER_ACE(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n        mem_size=args.mem_size,\n        batch_size_mem=args.batch_size_mem,\n    )\n    for t, experience in enumerate(scenario.train_stream):\n        cl_strategy.train(\n            experience,\n            num_workers=0,\n            drop_last=True,\n        )\n        cl_strategy.eval(scenario.test_stream[: t + 1])\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == \"__main__\":\n    res = erace_scifar10()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar10/er_aml.py",
    "content": "#!/usr/bin/env python3\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitCIFAR10\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom avalanche.training.supervised import ER_AML\nfrom experiments.utils import create_default_args, set_seed\nfrom models import SingleHeadReducedResNet18\n\n\ndef eraml_scifar10(override_args=None):\n    \"\"\"\n    Reproducing ER-AML experiments from paper\n    \"New insights on Reducing Abrupt Representation Change in Online Continual Learning\"\n    by Lucas Caccia et. al\n    https://openreview.net/forum?id=N8MaByOzUfb\n    \"\"\"\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 1000,\n            \"lr\": 0.1,\n            \"temp\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 10,\n        },\n        override_args,\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(10)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    unique_transform = transforms.Compose(\n        [\n            transforms.ToTensor(),\n            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n        ]\n    )\n\n    scenario = SplitCIFAR10(\n        5,\n        return_task_id=False,\n        seed=0,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n        train_transform=unique_transform,\n        eval_transform=unique_transform,\n    )\n\n    input_size = (3, 32, 32)\n    model = SingleHeadReducedResNet18(10)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n    interactive_logger = InteractiveLogger()\n    loggers = [interactive_logger]\n    training_metrics = []\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    plugins = []\n    cl_strategy = ER_AML(\n        model=model,\n        feature_extractor=model.feature_extractor,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n        mem_size=args.mem_size,\n        batch_size_mem=args.batch_size_mem,\n    )\n    for t, experience in enumerate(scenario.train_stream):\n        cl_strategy.train(\n            experience,\n            num_workers=0,\n            drop_last=True,\n        )\n        cl_strategy.eval(scenario.test_stream[: t + 1])\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == \"__main__\":\n    res = eraml_scifar10()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar10/mir.py",
    "content": "import numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitCIFAR10\nfrom avalanche.benchmarks import benchmark_with_validation_stream\nfrom avalanche.benchmarks.scenarios import split_online_stream\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SlimResNet18\nfrom avalanche.training.plugins import EvaluationPlugin, MIRPlugin\nfrom avalanche.training.supervised import Naive, Naive\nfrom experiments.utils import create_default_args, set_seed\n\n\ndef mir_scifar10(override_args=None):\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 1000,\n            \"lr\": 0.05,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"subsample\": 50,\n            \"batch_size_mem\": 10,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(10)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n    scenario = SplitCIFAR10(\n        5,\n        return_task_id=False,\n        seed=0,\n        fixed_class_order=fixed_class_order,\n        train_transform=transforms.ToTensor(),\n        eval_transform=transforms.ToTensor(),\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n    )\n    scenario = benchmark_with_validation_stream(scenario, 0.05)\n    model = SlimResNet18(10)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n    interactive_logger = InteractiveLogger()\n    loggers = [interactive_logger]\n    training_metrics = []\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    plugins = [\n        MIRPlugin(\n            mem_size=args.mem_size, subsample=args.subsample, batch_size_mem=args.batch_size_mem\n        )\n    ]\n    cl_strategy = Naive(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n    )\n\n    ocl_scenario = split_online_stream(\n        original_stream=scenario.train_stream,\n        experience_size=10,\n        access_task_boundaries=False,\n    )\n    for t, experience in enumerate(ocl_scenario):\n        cl_strategy.train(\n            experience,\n            eval_streams=[],\n            num_workers=0,\n            drop_last=True,\n        )\n\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == \"__main__\":\n    res = mir_scifar10()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar10/online_er_ace.py",
    "content": "#!/usr/bin/env python3\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitCIFAR10\nfrom avalanche.benchmarks.scenarios import split_online_stream\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SlimResNet18\nfrom avalanche.models.dynamic_modules import IncrementalClassifier\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom avalanche.training.supervised import Naive, OnlineER_ACE\nfrom experiments.utils import create_default_args, set_seed\n\n\ndef eracl_scifar10(override_args=None):\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 1000,\n            \"lr\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 10,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(10)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    scenario = SplitCIFAR10(\n        5,\n        return_task_id=False,\n        seed=args.seed,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n    )\n\n    model = SlimResNet18(1)\n    model.linear = IncrementalClassifier(model.linear.in_features, 1)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n\n    interactive_logger = InteractiveLogger()\n\n    loggers = [interactive_logger]\n\n    training_metrics = []\n\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n\n    # Create main evaluator that will be used by the training actor\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n\n    plugins = []\n\n    #######################\n    #  Strategy Creation  #\n    #######################\n\n    cl_strategy = OnlineER_ACE(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n        mem_size=args.mem_size,\n        batch_size_mem=args.batch_size_mem,\n    )\n\n    ###################\n    #  TRAINING LOOP  #\n    ###################\n\n    print(\"Starting experiment...\")\n\n    print([p.__class__.__name__ for p in cl_strategy.plugins])\n\n    ocl_scenario = split_online_stream(\n        original_stream=scenario.train_stream,\n        experience_size=10,\n        access_task_boundaries=False,\n    )\n    for t, experience in enumerate(ocl_scenario):\n        cl_strategy.train(\n            experience,\n            eval_streams=[],\n            num_workers=0,\n            drop_last=True,\n        )\n\n    results = cl_strategy.eval(scenario.test_stream)\n\n    return results\n\n\nif __name__ == \"__main__\":\n    res = eracl_scifar10()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar10/online_replay.py",
    "content": "import numpy as np\nimport torch\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitCIFAR10\nfrom avalanche.benchmarks import benchmark_with_validation_stream\nfrom avalanche.benchmarks.scenarios import split_online_stream\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SlimResNet18\nfrom avalanche.training.plugins import EvaluationPlugin, ReplayPlugin\nfrom avalanche.training.storage_policy import ClassBalancedBuffer\nfrom avalanche.training.supervised import Naive\nfrom experiments.utils import create_default_args, set_seed\n\n\ndef online_replay_scifar10(override_args=None):\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 1000,\n            \"lr\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 10,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(10)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    scenario = SplitCIFAR10(\n        5,\n        return_task_id=False,\n        seed=args.seed,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n    )\n\n    scenario = benchmark_with_validation_stream(scenario, 0.05)\n    model = SlimResNet18(10)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n\n    interactive_logger = InteractiveLogger()\n\n    loggers = [interactive_logger]\n\n    training_metrics = []\n\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    \n    storage_policy = ClassBalancedBuffer(args.mem_size, adaptive_size=True)\n    plugins = [ReplayPlugin(args.mem_size, storage_policy=storage_policy)]\n\n\n    cl_strategy = Naive(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n    )\n\n    ocl_scenario = split_online_stream(\n        original_stream=scenario.train_stream,\n        experience_size=args.train_mb_size,\n        access_task_boundaries=False,\n    )\n\n    for t, experience in enumerate(ocl_scenario):\n        cl_strategy.train(\n            experience,\n            eval_streams=[],\n            num_workers=0,\n            drop_last=True,\n        )\n    results = cl_strategy.eval(scenario.test_stream)\n\n    return results\n\n\nif __name__ == \"__main__\":\n    res = online_replay_scifar10()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar10/supervised_contrastive_replay.py",
    "content": "from avalanche.training import SCR\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitCIFAR10\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SlimResNet18, SCRModel\nfrom avalanche.training.plugins import EvaluationPlugin\nimport kornia.augmentation as K\n\nfrom experiments.utils import create_default_args, set_seed\nfrom avalanche.benchmarks.scenarios import split_online_stream\nfrom torch.utils.data import DataLoader\nfrom avalanche.training.losses import SCRLoss\n\n\ndef online_scr_scifar10(override_args=None):\n    \"\"\"\n    Reproducing Supervised Contrastive Replay paper\n    \"Supervised Contrastive Replay: Revisiting the Nearest Class Mean Classifier\n    in Online Class-Incremental Continual Learning\" by Mai et. al. (2021).\n    https://arxiv.org/abs/2103.13885\n\n    In the original paper, SCR uses the ReviewTrick\n    technique (fine-tuning on the buffer at the end of training on each experience).\n    For fairness of comparison with the other strategies, we do not employ\n    the review trick, therefore our results\n    are lower wrt the original paper. However, you can activate the review trick\n    by setting the corresponding parameter to True in the args.\n    \"\"\"\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 200,\n            \"lr\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 100,\n            \"review_trick\": False\n        },\n        override_args\n    )\n\n    set_seed(args.seed)\n\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    data_transform = transforms.Compose([\n        transforms.ToTensor(),\n    ])\n    scenario = SplitCIFAR10(\n        5,\n        return_task_id=False,\n        train_transform=data_transform,\n        eval_transform=data_transform,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n    )\n\n    # SlimResNet18 is used as encoder\n    # the projection network takes as input the output of the ResNet\n    nf = 20\n    encoding_network = SlimResNet18(nclasses=10, nf=nf)\n    encoding_network.linear = torch.nn.Identity()\n    projection_network = torch.nn.Sequential(\n        torch.nn.Linear(nf*8, nf*8), torch.nn.ReLU(inplace=True), torch.nn.Linear(nf*8, 128))\n\n    # a NCM Classifier is used at eval time\n    model = SCRModel(\n        feature_extractor=encoding_network,\n        projection=projection_network)\n\n    optimizer = SGD(model.parameters(), lr=args.lr)\n\n    interactive_logger = InteractiveLogger()\n    loggers = [interactive_logger]\n    training_metrics = []\n\n    # training accuracy cannot be directly monitored with SCR\n    # training loss and eval loss are two different ones\n    evaluation_metrics = [\n        accuracy_metrics(experience=True, stream=True),\n        loss_metrics(epoch=True, experience=True, stream=True),\n    ]\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n\n    scr_transforms = torch.nn.Sequential(\n        K.RandomResizedCrop(size=(32, 32), scale=(0.2, 1.)),\n        K.RandomHorizontalFlip(),\n        K.ColorJitter(0.4, 0.4, 0.4, 0.1, p=0.8),\n        K.RandomGrayscale(p=0.2)\n    )\n    # should achieve around 48% final accuracy\n    cl_strategy = SCR(\n        model=model,\n        optimizer=optimizer,\n        augmentations=scr_transforms,\n        plugins=None,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n        mem_size=args.mem_size,\n        batch_size_mem=args.batch_size_mem\n    )\n\n    ocl_scenario = split_online_stream(\n        original_stream=scenario.train_stream,\n        experience_size=args.train_mb_size,\n        access_task_boundaries=False,\n    )\n\n    for t, experience in enumerate(ocl_scenario):\n        cl_strategy.train(experience)\n\n        if args.review_trick and experience.is_last_subexp:  # at the end of each macro experience\n            buffer = cl_strategy.replay_plugin.storage_policy.buffer\n            dl = DataLoader(buffer, batch_size=args.batch_size_mem, shuffle=True, drop_last=True)\n            model.train()\n            crit = SCRLoss(temperature=0.1)\n            for x, y, _ in dl:\n                assert x.size(0) % 2 == 0, f\"{x.size(0)}\"\n                x, y = x.to(device), y.to(device)\n                optimizer.zero_grad()\n                mb_x_augmented = scr_transforms(x)\n                x = torch.cat([x, mb_x_augmented], dim=0)\n                assert x.size(0) % 2 == 0, f\"{x.size(0)}\"\n\n                out = model(x)\n                assert out.size(0) % 2 == 0, f\"{x.size(0)}\"\n\n                original_batch_size = int(out.size(0) / 2)\n                original_examples = out[:original_batch_size]\n                augmented_examples = out[original_batch_size:]\n                out = torch.stack(\n                    [original_examples, augmented_examples],\n                    dim=1)\n\n                loss = crit(out, y)\n                loss.backward()\n                params = [p for p in model.parameters() if p.requires_grad and p.grad is not None]\n                grad = [p.grad.clone()/10. for p in params]\n                for g, p in zip(grad, params):\n                    p.grad.data.copy_(g)\n                optimizer.step()\n                model.eval()\n                cl_strategy.compute_class_means()\n\n    if args.review_trick:\n        model.eval()\n        cl_strategy.compute_class_means()\n\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == '__main__':\n    res = online_scr_scifar10()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar100/__init__.py",
    "content": "from .icarl import icarl_scifar100\nfrom .gem import gem_scifar100\nfrom .agem import agem_scifar100\nfrom .lamaml import lamaml_scifar100\nfrom .er_ace import erace_scifar100\nfrom .er_aml import eraml_scifar100\nfrom .online_replay import online_replay_scifar100\n"
  },
  {
    "path": "experiments/split_cifar100/agem.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD, Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MultiHeadReducedResNet18\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef agem_scifar100(override_args=None):\n    \"\"\"\n    \"Efficient Lifelong Learning with A-GEM\" by Chaudhry et. al. (2019).\n    https://openreview.net/pdf?id=Hkf2_sC5FX\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'patterns_per_exp': 250, 'epochs': 1,\n                                'sample_size': 1300, 'learning_rate': 0.03, 'train_mb_size': 10,\n                                'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.SplitCIFAR100(17, return_task_id=True, fixed_class_order=list(range(85)))\n    model = MultiHeadReducedResNet18()\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.AGEM(\n        model, SGD(model.parameters(), lr=args.learning_rate), criterion,\n        patterns_per_exp=args.patterns_per_exp, sample_size=args.sample_size,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin, plugins=[])\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n    return res\n\n\nif __name__ == '__main__':\n    res = agem_scifar100()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar100/er_ace.py",
    "content": "#!/usr/bin/env python3\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\nfrom torchvision.transforms import ToTensor\n\nfrom avalanche.benchmarks.classic import SplitCIFAR100\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SlimResNet18\nfrom avalanche.models.dynamic_modules import IncrementalClassifier\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom avalanche.training.supervised import ER_ACE\nfrom experiments.utils import create_default_args, set_seed\n\n\ndef erace_scifar100(override_args=None):\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 10000,\n            \"lr\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 10,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(100)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n    unique_transform = transforms.Compose(\n        [\n            ToTensor(),\n            transforms.Normalize(\n                (0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)\n            ),\n        ]\n    )\n    scenario = SplitCIFAR100(\n        20,\n        return_task_id=False,\n        seed=0,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n        train_transform=unique_transform,\n        eval_transform=unique_transform,\n    )\n    input_size = (3, 32, 32)\n    model = SlimResNet18(1)\n    model.linear = IncrementalClassifier(model.linear.in_features, 1)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n    interactive_logger = InteractiveLogger()\n    loggers = [interactive_logger]\n    training_metrics = []\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    plugins = []\n    cl_strategy = ER_ACE(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n        mem_size=args.mem_size,\n        batch_size_mem=args.batch_size_mem,\n    )\n    for t, experience in enumerate(scenario.train_stream):\n        cl_strategy.train(\n            experience,\n            num_workers=0,\n            drop_last=True,\n        )\n        cl_strategy.eval(scenario.test_stream[: t + 1])\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == \"__main__\":\n    res = erace_scifar100()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar100/er_aml.py",
    "content": "#!/usr/bin/env python3\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\nfrom torchvision.transforms import ToTensor\n\nfrom avalanche.benchmarks.classic import SplitCIFAR100\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom avalanche.training.supervised import ER_AML\nfrom experiments.utils import create_default_args, set_seed\nfrom models import SingleHeadReducedResNet18\n\n\ndef eraml_scifar100(override_args=None):\n    \"\"\"\n    Reproducing ER-AML experiments from paper\n    \"New insights on Reducing Abrupt Representation Change in Online Continual Learning\"\n    by Lucas Caccia et. al\n    https://openreview.net/forum?id=N8MaByOzUfb\n    \"\"\"\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 10000,\n            \"lr\": 0.1,\n            \"temp\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 10,\n        },\n        override_args,\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(100)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n    unique_transform = transforms.Compose(\n        [\n            ToTensor(),\n            transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),\n        ]\n    )\n    scenario = SplitCIFAR100(\n        20,\n        return_task_id=False,\n        seed=0,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n        train_transform=unique_transform,\n        eval_transform=unique_transform,\n    )\n    input_size = (3, 32, 32)\n    model = SingleHeadReducedResNet18(100)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n    interactive_logger = InteractiveLogger()\n    loggers = [interactive_logger]\n    training_metrics = []\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    plugins = []\n    cl_strategy = ER_AML(\n        model=model,\n        feature_extractor=model.feature_extractor,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n        mem_size=args.mem_size,\n        batch_size_mem=args.batch_size_mem,\n    )\n    for t, experience in enumerate(scenario.train_stream):\n        cl_strategy.train(\n            experience,\n            num_workers=0,\n            drop_last=True,\n        )\n        cl_strategy.eval(scenario.test_stream[: t + 1])\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == \"__main__\":\n    res = eraml_scifar100()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar100/gem.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MultiHeadReducedResNet18\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef gem_scifar100(override_args=None):\n    \"\"\"\n    \"Gradient Episodic Memory for Continual Learning\" by Lopez-paz et. al. (2017).\n    https://proceedings.neurips.cc/paper/2017/hash/f87522788a2be2d171666752f97ddebb-Abstract.html\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'patterns_per_exp': 256, 'epochs': 1,\n                                'mem_strength': 0.5, 'learning_rate': 0.1, 'train_mb_size': 10,\n                                'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.SplitCIFAR100(20, return_task_id=True)\n    model = MultiHeadReducedResNet18()\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.GEM(\n        model, SGD(model.parameters(), lr=args.learning_rate, momentum=0.9), criterion,\n        patterns_per_exp=args.patterns_per_exp, memory_strength=args.mem_strength,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = gem_scifar100()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar100/icarl.py",
    "content": "import numpy as np\nimport torch\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.optim import SGD\nfrom torchvision import transforms\n\nfrom experiments.utils import set_seed, create_default_args\n\nfrom avalanche.benchmarks import SplitCIFAR100\nfrom avalanche.models import IcarlNet, make_icarl_net, initialize_icarl_net\nfrom avalanche.training.plugins.lr_scheduling import LRSchedulerPlugin\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom avalanche.evaluation.metrics import *\nfrom avalanche.logging.interactive_logging import InteractiveLogger\nfrom avalanche.training import ICaRL\n\n\ndef icarl_cifar100_augment_data(img):\n    img = img.numpy()\n    padded = np.pad(img, ((0, 0), (4, 4), (4, 4)), mode='constant')\n    random_cropped = np.zeros(img.shape, dtype=np.float32)\n    crop = np.random.randint(0, high=8 + 1, size=(2,))\n\n    # Cropping and possible flipping\n    if np.random.randint(2) > 0:\n        random_cropped[:, :, :] = \\\n            padded[:, crop[0]:(crop[0]+32), crop[1]:(crop[1]+32)]\n    else:\n        random_cropped[:, :, :] = \\\n            padded[:, crop[0]:(crop[0]+32), crop[1]:(crop[1]+32)][:, :, ::-1]\n    t = torch.tensor(random_cropped)\n    return t\n\n\ndef icarl_scifar100(override_args=None):\n    \"\"\"\n    \"iCaRL: Incremental Classifier and Representation Learning\",\n    Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, Christoph H. Lampert;\n    Proceedings of the IEEE Conference on\n    Computer Vision and Pattern Recognition (CVPR), 2017, pp. 2001-2010\n    https://openaccess.thecvf.com/content_cvpr_2017/html/Rebuffi_iCaRL_Incremental_Classifier_CVPR_2017_paper.html\n\n    The expected performance is 50%, while we achieve a lower score.\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'batch_size': 128, 'nb_exp': 10,\n                                'memory_size': 2000, 'epochs': 70, 'lr_base': 2.,\n                                'lr_milestones': [49, 63], 'lr_factor': 5.,\n                                'wght_decay': 0.00001, 'seed': None}, override_args)\n\n    # class incremental learning: classes mutual exclusive\n    fixed_class_order = [87, 0, 52, 58, 44, 91, 68, 97, 51, 15,\n                         94, 92, 10, 72, 49, 78, 61, 14, 8, 86,\n                         84, 96, 18, 24, 32, 45, 88, 11, 4, 67,\n                         69, 66, 77, 47, 79, 93, 29, 50, 57, 83,\n                         17, 81, 41, 12, 37, 59, 25, 20, 80, 73,\n                         1, 28, 6, 46, 62, 82, 53, 9, 31, 75,\n                         38, 63, 33, 74, 27, 22, 36, 3, 16, 21,\n                         60, 19, 70, 90, 89, 43, 5, 42, 65, 76,\n                         40, 30, 23, 85, 2, 95, 56, 48, 71, 64,\n                         98, 13, 99, 7, 34, 55, 54, 26, 35, 39]\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                             args.cuda >= 0 else \"cpu\")\n\n    transform_prototypes = transforms.Compose([\n        icarl_cifar100_augment_data,\n    ])\n\n    train_trsf = transforms.Compose([transforms.ToTensor(),\n                                     icarl_cifar100_augment_data])\n    test_trsf = transforms.Compose([transforms.ToTensor()])\n\n    benchmark = SplitCIFAR100(\n        n_experiences=args.nb_exp, seed=args.seed,\n        fixed_class_order=fixed_class_order,\n        train_transform=train_trsf,\n        eval_transform=test_trsf\n    )\n\n    interactive_logger = InteractiveLogger()\n    eval_plugin = EvaluationPlugin(\n        accuracy_metrics(experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    # _____________________________Strategy\n    model: IcarlNet = make_icarl_net(num_classes=100)\n    model.apply(initialize_icarl_net)\n\n    optim = SGD(model.parameters(), lr=args.lr_base,\n                weight_decay=args.wght_decay, momentum=0.9)\n    sched = LRSchedulerPlugin(\n        MultiStepLR(optim, args.lr_milestones, gamma=1.0 / args.lr_factor))\n\n    strategy = ICaRL(\n        model.feature_extractor, model.classifier, optim,\n        args.memory_size,\n        buffer_transform=transform_prototypes,\n        fixed_memory=True, train_mb_size=args.batch_size,\n        train_epochs=args.epochs, eval_mb_size=args.batch_size,\n        plugins=[sched], device=device, evaluator=eval_plugin\n    )\n\n    for i, exp in enumerate(benchmark.train_stream):\n        strategy.train(exp, num_workers=4)\n        res = strategy.eval(benchmark.test_stream, num_workers=4)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = icarl_scifar100()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar100/lamaml.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\n\nfrom avalanche.evaluation import metrics as metrics\nfrom avalanche.training.storage_policy import ReservoirSamplingBuffer\nfrom avalanche.training.plugins import ReplayPlugin\nfrom avalanche.training.supervised.lamaml_v2 import LaMAML\n\nfrom models.models_lamaml import MTConvCIFAR\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef lamaml_scifar100(override_args=None):\n    \"\"\"\n    \"La-MAML: Look-ahead Meta Learning for Continual Learning\",\n    Gunshi Gupta, Karmesh Yadav, Liam Paull;\n    NeurIPS, 2020\n    https://arxiv.org/abs/2007.13904\n    \"\"\"\n    # Args\n    args = create_default_args(\n        {'cuda': 0, 'n_inner_updates': 5, 'second_order': True,\n         'grad_clip_norm': 1.0, 'learn_lr': True, 'lr_alpha': 0.25,\n         'sync_update': False, 'mem_size': 200, 'lr': 0.1,\n         'train_mb_size': 10, 'train_epochs': 10, 'seed': None}, override_args\n    )\n\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n    # Benchmark\n    benchmark = avl.benchmarks.SplitCIFAR100(n_experiences=20,\n                                             return_task_id=True)\n\n    # Loggers and metrics\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    # Buffer\n    rs_buffer = ReservoirSamplingBuffer(max_size=args.mem_size)\n    replay_plugin = ReplayPlugin(\n        mem_size=args.mem_size,\n        batch_size=args.train_mb_size,\n        batch_size_mem=args.train_mb_size,\n        task_balanced_dataloader=False,\n        storage_policy=rs_buffer\n    )\n\n    # Strategy\n    model = MTConvCIFAR()\n    cl_strategy = LaMAML(\n        model,\n        torch.optim.SGD(model.parameters(), lr=args.lr),\n        CrossEntropyLoss(),\n        n_inner_updates=args.n_inner_updates,\n        second_order=args.second_order,\n        grad_clip_norm=args.grad_clip_norm,\n        learn_lr=args.learn_lr,\n        lr_alpha=args.lr_alpha,\n        sync_update=args.sync_update,\n        train_mb_size=args.train_mb_size,\n        train_epochs=args.train_epochs,\n        eval_mb_size=100,\n        device=device,\n        plugins=[replay_plugin],\n        evaluator=evaluation_plugin,\n    )\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n    return res\n\n\nif __name__ == '__main__':\n    res = lamaml_scifar100()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar100/online_replay.py",
    "content": "import numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitCIFAR100\nfrom avalanche.benchmarks import benchmark_with_validation_stream\nfrom avalanche.benchmarks.scenarios import split_online_stream\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SlimResNet18\nfrom avalanche.training.plugins import EvaluationPlugin, ReplayPlugin\nfrom avalanche.training.storage_policy import ClassBalancedBuffer\nfrom avalanche.training.supervised import Naive\nfrom experiments.utils import create_default_args, set_seed\n\n\ndef online_replay_scifar100(override_args=None):\n    \"\"\" \n    Online replay for CIFAR100, the hyperparameters are taken from\n    \"New Insights on Reducing Abrupt Representation Change in Online Continual Learning\", \n    Lucas Caccia et. al., https://openreview.net/forum?id=N8MaByOzUfb \n\n    Augmentations are not used since they make the results worse in this particular setting\n    \"\"\"\n\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 10000,\n            \"lr\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 10,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(100)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    unique_transform = transforms.Compose(\n        [\n            transforms.ToTensor(),\n            transforms.Normalize(\n                (0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)\n            ),\n        ]\n    )\n\n    scenario = SplitCIFAR100(\n        20,\n        return_task_id=False,\n        seed=args.seed,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n        train_transform=unique_transform,\n        eval_transform=unique_transform,\n    )\n\n    scenario = benchmark_with_validation_stream(scenario, 0.05)\n    model = SlimResNet18(100)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n\n    interactive_logger = InteractiveLogger()\n\n    loggers = [interactive_logger]\n\n    training_metrics = []\n\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    \n    storage_policy = ClassBalancedBuffer(args.mem_size, adaptive_size=True)\n    plugins = [ReplayPlugin(args.mem_size, storage_policy=storage_policy)]\n\n\n    cl_strategy = Naive(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n    )\n\n    ocl_scenario = split_online_stream(\n        original_stream=scenario,\n        experience_size = args.train_mb_size,\n        access_task_boundaries = False\n    )\n\n    for t, experience in enumerate(ocl_scenario):\n        cl_strategy.train(\n            experience,\n            eval_streams=[],\n            num_workers=0,\n            drop_last=True,\n        )\n\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == \"__main__\":\n    res = online_replay_scifar100()\n    print(res)\n"
  },
  {
    "path": "experiments/split_cifar100/replay.py",
    "content": "#!/usr/bin/env python3\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitCIFAR100\nfrom avalanche.benchmarks.generators import benchmark_with_validation_stream\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models.resnet32 import resnet32\nfrom avalanche.models.dynamic_modules import IncrementalClassifier\nfrom avalanche.training.plugins import EvaluationPlugin, ReplayPlugin, LRSchedulerPlugin\nfrom avalanche.training.storage_policy import ClassBalancedBuffer\nfrom avalanche.training.supervised import Naive\nfrom experiments.utils import create_default_args, set_seed\nfrom torch.optim.lr_scheduler import StepLR\n\n\ndef replay_scifar100(override_args=None):\n    \"\"\" \n    Replay for Split CIFAR100\n    \"\"\"\n\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"num_epochs\": 200,\n            \"mem_size\": 2000,\n            \"momentum\": 0.9,\n            \"weight_decay\": 0.0002,\n            \"lr\": 0.1,\n            \"train_mb_size\": 128,\n            \"seed\": None,\n            \"batch_size_mem\": 128,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(100)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    scenario = SplitCIFAR100(\n        20,\n        return_task_id=False,\n        seed=args.seed,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n    )\n\n    scenario = benchmark_with_validation_stream(scenario, 0.05)\n    input_size = (3, 32, 32)\n    model = resnet32(num_classes=1)\n    model.fc = IncrementalClassifier(model.fc.in_features, 1)\n\n    optimizer = SGD(model.parameters(), momentum=args.momentum, weight_decay=args.weight_decay, lr=args.lr)\n\n    scheduler = StepLR(optimizer, step_size=args.num_epochs//3, gamma=0.3)\n\n    scheduler_plugin = LRSchedulerPlugin(scheduler, step_granularity=\"epoch\", first_exp_only=False)\n\n    interactive_logger = InteractiveLogger()\n\n    loggers = [interactive_logger]\n\n    training_metrics = []\n\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    \n    storage_policy = ClassBalancedBuffer(args.mem_size, adaptive_size=True)\n    plugins = [scheduler_plugin, ReplayPlugin(args.mem_size, storage_policy=storage_policy)]\n\n    cl_strategy = Naive(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        train_epochs=args.num_epochs,\n        eval_mb_size=64,\n    )\n\n    for t, experience in enumerate(scenario.train_stream):\n        print(\"Start of experience: \", experience.current_experience)\n        print(\"Current Classes: \", experience.classes_in_this_experience)\n\n        cl_strategy.train(\n            experience,\n            eval_streams=[],\n            num_workers=2,\n            drop_last=True,\n        )\n\n        cl_strategy.eval(scenario.test_stream[: t + 1])\n\n    # Only evaluate at the end on the test stream\n    results = cl_strategy.eval(scenario.test_stream)\n\n    return results\n\n\nif __name__ == \"__main__\":\n    res = replay_scifar100()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/__init__.py",
    "content": "from .synaptic_intelligence import synaptic_intelligence_smnist\nfrom .lwf import lwf_smnist\nfrom .gss import gss_smnist\nfrom .gdumb import gdumb_smnist\nfrom .cope import cope_smnist\nfrom .generative_replay import generative_replay_smnist\nfrom .rwalk import rwalk_smnist\nfrom .naive import naive_smnist\nfrom .online_replay import online_replay_smnist\nfrom .mir import mir_smnist\n"
  },
  {
    "path": "experiments/split_mnist/cope.py",
    "content": "import torch\nimport avalanche as avl\nfrom experiments.utils import set_seed, create_default_args\nfrom models import MLP\n\n\ndef cope_smnist(override_args=None):\n    \"\"\"\n    \"Continual prototype evolution: Learning online from non-stationary data streams\"\n    by De Lange et. al. (2021).\n    https://arxiv.org/abs/2009.00919\n\n    Expected performance is 93%, which is higher than what we achieve.\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'nb_tasks': 5, 'batch_size': 10, 'epochs': 1,\n                                'mem_size': 2000, 'alpha': 0.99, 'T': 0.1, 'featsize': 32,\n                                'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    n_classes = 10\n    task_scenario = avl.benchmarks.SplitMNIST(args.nb_tasks, return_task_id=False,\n                                              fixed_class_order=[i for i in range(n_classes)])\n\n    # Make data incremental (one batch = one experience)\n    benchmark = avl.benchmarks.data_incremental_benchmark(task_scenario,\n                                                          experience_size=args.batch_size)\n    print(f\"{benchmark.n_experiences} batches in online data incremental setup.\")\n    # 6002 batches for SplitMNIST with batch size 10\n    # ---------\n\n    model = MLP(output_size=args.featsize,\n                hidden_size=400, hidden_layers=2, drop_rate=0)\n\n    logger = avl.logging.InteractiveLogger()\n\n    eval_plugin = avl.training.plugins.EvaluationPlugin(\n        avl.evaluation.metrics.accuracy_metrics(experience=True, stream=True),\n        avl.evaluation.metrics.loss_metrics(experience=False, stream=True),\n        avl.evaluation.metrics.StreamForgetting(),\n        loggers=[logger])\n\n    cope = avl.training.plugins.CoPEPlugin(mem_size=args.mem_size, alpha=args.alpha,\n                                           p_size=args.featsize, n_classes=n_classes,\n                                           T=args.T)\n\n    cl_strategy = avl.training.Naive(\n        model, torch.optim.SGD(model.parameters(), lr=0.01),\n        cope.ppp_loss,  # CoPE PPP-Loss\n        train_mb_size=args.batch_size, train_epochs=args.epochs,\n        eval_mb_size=100, device=device,\n        plugins=[cope],\n        evaluator=eval_plugin\n        )\n\n    cl_strategy.train(benchmark.train_stream)\n    res = cl_strategy.eval(benchmark.test_stream)\n    return res\n\n\nif __name__ == '__main__':\n    res = cope_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/gdumb.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef gdumb_smnist(override_args=None):\n    \"\"\"\n    \"GDumb: A Simple Approach that Questions Our Progress in Continual Learning\" by Prabhu et. al. (2020).\n    https://link.springer.com/chapter/10.1007/978-3-030-58536-5_31\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'hidden_size': 400, 'mem_size': 4400,\n                                'hidden_layers': 2, 'epochs': 10, 'dropout': 0,\n                                'learning_rate': 0.1, 'train_mb_size': 16, 'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.SplitMNIST(5, return_task_id=False)\n    model = MLP(hidden_size=args.hidden_size, hidden_layers=args.hidden_layers,\n                drop_rate=args.dropout, relu_act=True)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.GDumb(\n        model, SGD(model.parameters(), lr=args.learning_rate), criterion,\n        mem_size=args.mem_size,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = gdumb_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/generative_replay.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef generative_replay_smnist(override_args=None):\n    \"\"\"\n    \"Continual Learning with Deep Generative Replay\" by Shin et. al. (2017).\n    https://arxiv.org/abs/1705.08690\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'hidden_size': 400,\n                                'hidden_layers': 2, 'epochs': 10, 'dropout': 0,\n                                'learning_rate': 0.001, 'train_mb_size': 16, 'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.SplitMNIST(5, return_task_id=False)\n    model = MLP(hidden_size=args.hidden_size, hidden_layers=args.hidden_layers,\n                drop_rate=args.dropout, relu_act=True)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.GenerativeReplay(\n        model,\n        torch.optim.Adam(model.parameters(), lr=args.learning_rate),\n        criterion,\n        train_mb_size=args.train_mb_size,\n        train_epochs=args.epochs,\n        eval_mb_size=128,\n        replay_size=100,\n        device=device,\n        evaluator=evaluation_plugin,\n    )\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = generative_replay_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/gss.py",
    "content": "import torch.nn as nn\n\nfrom avalanche.benchmarks import CLExperience\nfrom avalanche.benchmarks.classic import SplitMNIST\nfrom avalanche.benchmarks import data_incremental_benchmark\nfrom avalanche.evaluation.metrics import \\\n    accuracy_metrics, \\\n    loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom avalanche.training import GSS_greedy\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom experiments.utils import set_seed, create_default_args\nfrom models import MLP_gss\n\n\ndef gss_smnist(override_args=None):\n    \"\"\"\n    https://arxiv.org/abs/1903.08671\n\n    Expected accuracy is 82% which is slightly higher than the one we achieve.\n    \"\"\"\n    args = create_default_args({\n        'cuda': 0, 'lr': 0.05,\n        'train_mb_size': 10, 'mem_strength': 10,\n        'input_size': [1, 28, 28], 'train_epochs': 3, 'eval_mb_size': 10,\n        'mem_size': 300, 'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n    model, benchmark = setup_mnist()\n    eval_plugin = EvaluationPlugin(\n        accuracy_metrics(epoch=True, experience=True, stream=True),\n        loss_metrics(stream=True), loggers=[InteractiveLogger()])\n\n    optimizer = SGD(model.parameters(), lr=args.lr)\n    strategy = GSS_greedy(model, optimizer, criterion=CrossEntropyLoss(),\n                          mem_strength=args.mem_strength,\n                          input_size=args.input_size,\n                          train_epochs=args.train_epochs,\n                          train_mb_size=args.train_mb_size,\n                          eval_mb_size=args.eval_mb_size,\n                          mem_size=args.mem_size,\n                          device=device,\n                          evaluator=eval_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        print(\">Experience \", experience.current_experience)\n        strategy.train(experience)\n        res = strategy.eval(benchmark.test_stream)\n\n    return res\n\n\ndef shrinking_experience_size_split_strategy(\n        experience: CLExperience):\n\n    experience_size = 1000\n\n    exp_dataset = experience.dataset\n    exp_indices = list(range(len(exp_dataset)))\n\n    result_datasets = []\n\n    exp_indices = \\\n        torch.as_tensor(exp_indices)[\n            torch.randperm(len(exp_indices))\n        ].tolist()\n\n    result_datasets.append(exp_dataset.subset(exp_indices[0:experience_size]))\n\n    return result_datasets\n\n\ndef setup_mnist():\n\n    scenario = data_incremental_benchmark(SplitMNIST(\n        n_experiences=5, seed=1), experience_size=0,\n        custom_split_strategy=shrinking_experience_size_split_strategy)\n    n_inputs = 784\n    nh = 100\n    nl = 2\n    n_outputs = 10\n    model = MLP_gss([n_inputs] + [nh] * nl + [n_outputs])\n\n    return model, scenario\n\n\nif __name__ == '__main__':\n    res = gss_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/lwf.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\nclass LwFCEPenalty(avl.training.LwF):\n    \"\"\"This wrapper around LwF computes the total loss\n    by diminishing the cross-entropy contribution over time,\n    as per the paper\n    \"Three scenarios for continual learning\" by van de Ven et. al. (2018).\n    https://arxiv.org/pdf/1904.07734.pdf\n    The loss is L_tot = (1/n_exp_so_far) * L_cross_entropy +\n                        alpha[current_exp] * L_distillation\n    \"\"\"\n    def _before_backward(self, **kwargs):\n        self.loss *= float(1/(self.clock.train_exp_counter+1))\n        super()._before_backward(**kwargs)\n\n\ndef lwf_smnist(override_args=None):\n    \"\"\"\n    \"Learning without Forgetting\" by Li et. al. (2016).\n    http://arxiv.org/abs/1606.09282\n    Since experimental setup of the paper is quite outdated and not\n    easily reproducible, this experiment is based on\n    \"Three scenarios for continual learning\" by van de Ven et. al. (2018).\n    https://arxiv.org/pdf/1904.07734.pdf\n\n    The hyper-parameter alpha controlling the regularization is increased over time, resulting\n    in a regularization of  (1- 1/n_exp_so_far) * L_distillation\n    \"\"\"\n    args = create_default_args({'cuda': 0,\n                                'lwf_alpha': [0, 0.5, 1.33333, 2.25, 3.2],\n                                'lwf_temperature': 2, 'epochs': 21,\n                                'layers': 1, 'hidden_size': 200,\n                                'learning_rate': 0.001, 'train_mb_size': 128,\n                                'seed': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.SplitMNIST(5, return_task_id=False)\n    model = MLP(hidden_size=args.hidden_size, hidden_layers=args.layers,\n                initial_out_features=0, relu_act=False)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = LwFCEPenalty(\n        model, SGD(model.parameters(), lr=args.learning_rate), criterion,\n        alpha=args.lwf_alpha, temperature=args.lwf_temperature,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = lwf_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/mir.py",
    "content": "import numpy as np\nimport torch\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitMNIST\nfrom avalanche.benchmarks import benchmark_with_validation_stream\nfrom avalanche.benchmarks.scenarios import split_online_stream\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SimpleMLP\nfrom avalanche.training.plugins import EvaluationPlugin, MIRPlugin\nfrom avalanche.training.supervised import Naive\nfrom experiments.utils import create_default_args, set_seed, restrict_dataset_size\n\n\ndef mir_smnist(override_args=None):\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 500,\n            \"lr\": 0.05,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"subsample\": 50,\n            \"batch_size_mem\": 10,\n            \"dataset_size\": 1000,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n    scenario = SplitMNIST(\n        5,\n        return_task_id=False,\n        seed=0,\n        train_transform=None,\n        eval_transform=None,\n    )\n    scenario = benchmark_with_validation_stream(scenario, 0.05)\n    scenario = restrict_dataset_size(scenario, args.dataset_size)\n    model = SimpleMLP(10, hidden_size=400, hidden_layers=1)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n    interactive_logger = InteractiveLogger()\n    loggers = [interactive_logger]\n    training_metrics = []\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    plugins = [\n        MIRPlugin(\n            mem_size=args.mem_size, subsample=args.subsample, batch_size_mem=args.batch_size_mem\n        )\n    ]\n    cl_strategy = Naive(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n    )\n    ocl_scenario = split_online_stream(\n            original_stream=scenario.train_stream,\n            experience_size=10,\n            access_task_boundaries=False,\n        )\n    for t, experience in enumerate(ocl_scenario):\n        cl_strategy.train(\n            experience,\n            eval_streams=[],\n            num_workers=0,\n            drop_last=True,\n        )\n    results = cl_strategy.eval(scenario.test_stream)\n    return results\n\n\nif __name__ == \"__main__\":\n    res = mir_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/naive.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MultiHeadMLP, MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef naive_smnist(override_args=None):\n    \"\"\"\n    \"Continual Learning Through Synaptic Intelligence\" by Zenke et. al. (2017).\n    http://proceedings.mlr.press/v70/zenke17a.html\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'epochs': 10,\n                                'learning_rate': 0.001, 'train_mb_size': 64,\n                                'seed': None,\n                                'task-incremental': False}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.SplitMNIST(5, return_task_id=args.task_incremental,\n                                          fixed_class_order=list(range(10)))\n\n    model = MultiHeadMLP(hidden_size=256, hidden_layers=2) if args.task_incremental \\\n        else MLP(hidden_size=256, hidden_layers=2)\n\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.Naive(\n        model, Adam(model.parameters(), lr=args.learning_rate), criterion,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = naive_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/online_replay.py",
    "content": "import numpy as np\nimport torch\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks.classic import SplitMNIST\nfrom avalanche.benchmarks import benchmark_with_validation_stream\nfrom avalanche.benchmarks.scenarios import split_online_stream\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.models import SimpleMLP\nfrom avalanche.training.plugins import EvaluationPlugin, ReplayPlugin\nfrom avalanche.training.storage_policy import ClassBalancedBuffer\nfrom avalanche.training.supervised import Naive\nfrom experiments.utils import create_default_args, set_seed\n\n\ndef online_replay_smnist(override_args=None):\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"mem_size\": 1000,\n            \"lr\": 0.1,\n            \"train_mb_size\": 10,\n            \"seed\": None,\n            \"batch_size_mem\": 10,\n        },\n        override_args\n    )\n    set_seed(args.seed)\n    fixed_class_order = np.arange(10)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    scenario = SplitMNIST(\n        5,\n        return_task_id=False,\n        seed=args.seed,\n        fixed_class_order=fixed_class_order,\n        shuffle=True,\n        class_ids_from_zero_in_each_exp=False,\n    )\n\n    scenario = benchmark_with_validation_stream(scenario, 0.05)\n    model = SimpleMLP(10)\n    optimizer = SGD(model.parameters(), lr=args.lr)\n\n    interactive_logger = InteractiveLogger()\n\n    loggers = [interactive_logger]\n\n    training_metrics = []\n\n    evaluation_metrics = [\n        accuracy_metrics(epoch=True, stream=True),\n        loss_metrics(epoch=True, stream=True),\n    ]\n\n    evaluator = EvaluationPlugin(\n        *training_metrics,\n        *evaluation_metrics,\n        loggers=loggers,\n    )\n    \n    storage_policy = ClassBalancedBuffer(args.mem_size, adaptive_size=True)\n    plugins = [ReplayPlugin(args.mem_size, storage_policy=storage_policy)]\n\n    cl_strategy = Naive(\n        model=model,\n        optimizer=optimizer,\n        plugins=plugins,\n        evaluator=evaluator,\n        device=device,\n        train_mb_size=args.train_mb_size,\n        eval_mb_size=64,\n    )\n\n    ocl_scenario = split_online_stream(\n        original_stream=scenario.train_stream,\n        experience_size=args.train_mb_size,\n        access_task_boundaries=False,\n    )\n    for t, experience in enumerate(ocl_scenario):\n        cl_strategy.train(\n            experience,\n            eval_streams=[],\n            num_workers=0,\n            drop_last=True,\n        )\n\n    results = cl_strategy.eval(scenario.test_stream)\n\n    return results\n\n\nif __name__ == \"__main__\":\n    res = online_replay_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/rwalk.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MultiHeadMLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef rwalk_smnist(override_args=None):\n    \"\"\"\n    Reproducing RWalk experiments from paper\n    \"Riemannian Walk for Incremental Learning:\n    Understanding Forgetting and Intransigence\" by Chaudhry et. al. (2018).\n    https://openaccess.thecvf.com/content_ECCV_2018/html/Arslan_Chaudhry__Riemannian_Walk_ECCV_2018_paper.html\n\n    The expected value is 99%, which is higher than the achieved one.\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'ewc_lambda': 0.1, 'ewc_alpha': 0.9, 'delta_t': 10,\n                                'epochs': 10, 'learning_rate': 0.001,\n                                'train_mb_size': 64, 'seed': None},\n                               override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n    benchmark = avl.benchmarks.SplitMNIST(5, return_task_id=True,\n                                          fixed_class_order=list(range(10)))\n    model = MultiHeadMLP(hidden_size=256, hidden_layers=2)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.Naive(\n        model, Adam(model.parameters(), lr=args.learning_rate), criterion,\n        plugins=[avl.training.plugins.RWalkPlugin(\n            ewc_lambda=args.ewc_lambda,\n            ewc_alpha=args.ewc_alpha,\n            delta_t=args.delta_t)],\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = rwalk_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_mnist/synaptic_intelligence.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MultiHeadMLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef synaptic_intelligence_smnist(override_args=None):\n    \"\"\"\n    \"Continual Learning Through Synaptic Intelligence\" by Zenke et. al. (2017).\n    http://proceedings.mlr.press/v70/zenke17a.html\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'si_lambda': 1, 'si_eps': 0.1, 'epochs': 10,\n                                'learning_rate': 0.001, 'train_mb_size': 64, 'seed': None},\n                               override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n    benchmark = avl.benchmarks.SplitMNIST(5, return_task_id=True,\n                                          fixed_class_order=list(range(10)))\n    model = MultiHeadMLP(hidden_size=256, hidden_layers=2)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.SynapticIntelligence(\n        model, Adam(model.parameters(), lr=args.learning_rate), criterion,\n        si_lambda=args.si_lambda, eps=args.si_eps,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == '__main__':\n    res = synaptic_intelligence_smnist()\n    print(res)\n"
  },
  {
    "path": "experiments/split_tiny_imagenet/__init__.py",
    "content": "from .mas import mas_stinyimagenet\nfrom .lwf import lwf_stinyimagenet\nfrom .lamaml import lamaml_stinyimagenet\nfrom .naive import naive_stinyimagenet\nfrom .packnet import packnet_stinyimagenet"
  },
  {
    "path": "experiments/split_tiny_imagenet/lamaml.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\n\nfrom avalanche.evaluation import metrics as metrics\nfrom avalanche.training.storage_policy import ReservoirSamplingBuffer\nfrom avalanche.training.plugins import ReplayPlugin\nfrom avalanche.training.supervised.lamaml_v2 import LaMAML\n\nfrom models.models_lamaml import MTConvTinyImageNet\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef lamaml_stinyimagenet(override_args=None):\n    \"\"\"\n    \"La-MAML: Look-ahead Meta Learning for Continual Learning\",\n    Gunshi Gupta, Karmesh Yadav, Liam Paull;\n    NeurIPS, 2020\n    https://arxiv.org/abs/2007.13904\n\n    Expected performance is 66%, which is higher than what we achieve.\n    \"\"\"\n    # Args\n    args = create_default_args(\n        {'cuda': 0, 'n_inner_updates': 5, 'second_order': True,\n         'grad_clip_norm': 1.0, 'learn_lr': True, 'lr_alpha': 0.4,\n         'sync_update': False, 'mem_size': 400, 'lr': 0.1, 'train_mb_size': 10,\n         'train_epochs': 10, 'seed': None}, override_args\n    )\n\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n    # Benchmark\n    benchmark = avl.benchmarks.SplitTinyImageNet(n_experiences=20,\n                                                 return_task_id=True)\n\n    # Loggers and metrics\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    # Buffer\n    rs_buffer = ReservoirSamplingBuffer(max_size=args.mem_size)\n    replay_plugin = ReplayPlugin(\n        mem_size=args.mem_size,\n        batch_size=args.train_mb_size,\n        batch_size_mem=args.train_mb_size,\n        task_balanced_dataloader=False,\n        storage_policy=rs_buffer\n    )\n\n    # Strategy\n    model = MTConvTinyImageNet()\n    cl_strategy = LaMAML(\n        model,\n        torch.optim.SGD(model.parameters(), lr=args.lr),\n        CrossEntropyLoss(),\n        n_inner_updates=args.n_inner_updates,\n        second_order=args.second_order,\n        grad_clip_norm=args.grad_clip_norm,\n        learn_lr=args.learn_lr,\n        lr_alpha=args.lr_alpha,\n        sync_update=args.sync_update,\n        train_mb_size=args.train_mb_size,\n        train_epochs=args.train_epochs,\n        eval_mb_size=100,\n        device=device,\n        plugins=[replay_plugin],\n        evaluator=evaluation_plugin,\n    )\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n    return res\n\n\nif __name__ == '__main__':\n    res = lamaml_stinyimagenet()\n    print(res)\n"
  },
  {
    "path": "experiments/split_tiny_imagenet/lwf.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MultiHeadVGGSmall\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef lwf_stinyimagenet(override_args=None):\n    \"\"\"\n    \"Learning without Forgetting\" by Li et. al. (2016).\n    http://arxiv.org/abs/1606.09282\n    Since experimental setup of the paper is quite outdated and not\n    easily reproducible, this experiment is based on\n    \"A continual learning survey: Defying forgetting in classification tasks\"\n    De Lange et. al. (2021).\n    https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9349197\n\n    We use a VGG network, which leads a lower performance than the one from\n    De Lange et. al. (2021).\n    \"\"\"\n    args = create_default_args({'cuda': 0,\n                                'lwf_alpha': 1, 'lwf_temperature': 2, 'epochs': 70,\n                                'learning_rate': 1e-3, 'train_mb_size': 200, 'seed': None,\n                                'dataset_root': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.SplitTinyImageNet(\n        10, return_task_id=True, dataset_root=args.dataset_root)\n    model = MultiHeadVGGSmall(n_classes=200)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.LwF(\n        model,\n        SGD(model.parameters(), lr=args.learning_rate, momentum=0.9),\n        criterion,\n        alpha=args.lwf_alpha, temperature=args.lwf_temperature,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == \"__main__\":\n    res = lwf_stinyimagenet()\n    print(res)\n"
  },
  {
    "path": "experiments/split_tiny_imagenet/mas.py",
    "content": "import torch\nfrom torch.nn import CrossEntropyLoss\n\nfrom avalanche.evaluation.metrics import (\n    accuracy_metrics,\n    forgetting_metrics,\n    loss_metrics\n)\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom models import MultiHeadVGGSmall\nfrom experiments.utils import set_seed, create_default_args\nimport avalanche as avl\n\n\ndef mas_stinyimagenet(override_args=None):\n    \"\"\"\n    Experiment adapted by\n    \"A continual learning survey: Defying forgetting in classification tasks\"\n    by De Lange et al.\n    https://doi.org/10.1109/TPAMI.2021.3057446\n    \"\"\"\n    args = create_default_args(\n        {'cuda': 0, 'lambda_reg': 1., 'alpha': 0.5,\n         'verbose': True, 'learning_rate': 0.001,\n         'train_mb_size': 200, 'epochs': 50, 'seed': None,\n         'dataset_root': None}, override_args)\n\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    \"\"\"\n    \"In order to construct a balanced dataset, we assign an equal amount of\n    20 randomly chosen classes to each task in a sequence of 10 consecutive\n    tasks. This task incremental setting allows using an oracle at test\n    time for our evaluation per task, ensuring all tasks are roughly\n    similar in terms of difficulty, size, and distribution, making the\n    interpretation of the results easier.\"\n    \"\"\"\n    benchmark = avl.benchmarks.SplitTinyImageNet(\n        10, return_task_id=True, dataset_root=args.dataset_root)\n    model = MultiHeadVGGSmall()\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = EvaluationPlugin(\n        accuracy_metrics(\n            epoch=True, experience=True, stream=True\n        ),\n        loss_metrics(\n            epoch=True, experience=True, stream=True\n        ),\n        forgetting_metrics(\n            experience=True, stream=True\n        ),\n        loggers=[interactive_logger])\n\n    optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9)\n    cl_strategy = avl.training.MAS(\n        model,\n        optimizer,\n        criterion, lambda_reg=args.lambda_reg, alpha=args.alpha,\n        verbose=args.verbose, train_mb_size=args.train_mb_size,\n        train_epochs=args.epochs, eval_mb_size=128, device=device,\n        evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == \"__main__\":\n    res = mas_stinyimagenet()\n    print(res)\n"
  },
  {
    "path": "experiments/split_tiny_imagenet/naive.py",
    "content": "import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MultiHeadVGGSmall\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef naive_stinyimagenet(override_args=None):\n    \"\"\"\n    \"Learning without Forgetting\" by Li et. al. (2016).\n    http://arxiv.org/abs/1606.09282\n    Since experimental setup of the paper is quite outdated and not\n    easily reproducible, this experiment is based on\n    \"A continual learning survey: Defying forgetting in classification tasks\"\n    De Lange et. al. (2021).\n    https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9349197\n\n    We use a VGG network, which leads a lower performance than the one from\n    De Lange et. al. (2021).\n    \"\"\"\n    args = create_default_args({'cuda': 0, 'epochs': 30,\n                                'learning_rate': 1e-3, 'train_mb_size': 200, 'seed': None,\n                                'dataset_root': None}, override_args)\n    set_seed(args.seed)\n    device = torch.device(f\"cuda:{args.cuda}\"\n                          if torch.cuda.is_available() and\n                          args.cuda >= 0 else \"cpu\")\n\n    benchmark = avl.benchmarks.SplitTinyImageNet(\n        10, return_task_id=True, dataset_root=args.dataset_root)\n    model = MultiHeadVGGSmall(n_classes=200)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger])\n\n    cl_strategy = avl.training.Naive(\n        model,\n        Adam(model.parameters(), lr=args.learning_rate),\n        criterion,\n        train_mb_size=args.train_mb_size, train_epochs=args.epochs, eval_mb_size=128,\n        device=device, evaluator=evaluation_plugin)\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == \"__main__\":\n    res = naive_stinyimagenet()\n    print(res)\n"
  },
  {
    "path": "experiments/split_tiny_imagenet/packnet.py",
    "content": "\"\"\"\nReproduce Delange et al. (2021) benchmark results for PackNet\n(Mallya & Lazebnik, 2018) on Split Tiny ImageNet\n\nDelange, M., Aljundi, R., Masana, M., Parisot, S., Jia, X., Leonardis, A., \n  Slabaugh, G., & Tuytelaars, T. (2021). A continual learning survey: Defying \n  forgetting in classification tasks. IEEE Transactions on Pattern Analysis \n  and Machine Intelligence, 1–1. https://doi.org/10.1109/TPAMI.2021.3057446\n\nMallya, A., & Lazebnik, S. (2018). PackNet: Adding Multiple Tasks to a Single\n  Network by Iterative Pruning. 2018 IEEE/CVF Conference on Computer Vision\n  and Pattern Recognition, 7765–7773. https://doi.org/10.1109/CVPR.2018.00810\n\"\"\"\n\nimport avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import Adam\nfrom avalanche.evaluation import metrics as metrics\nfrom experiments.utils import set_seed, create_default_args\nfrom models.vgg import SingleHeadVGGSmall\nfrom avalanche.models.packnet import PackNetModel, packnet_simple_mlp\nfrom avalanche.training.supervised.strategy_wrappers import PackNet\n\n\ndef packnet_stinyimagenet(override_args=None):\n    \"\"\"\n    The original PackNet paper uses an unusual experimental setup, so we\n    base this experiment on Delange et al. (2021) benchmark.\n\n    Delange et al. (2021) set prune_proportion using the Continual\n    Hyperparameter Selection Framework. We instead use `prune_proportion` such\n    that the number of parameters in each task-specific-subset of the model\n    are roughly equal.\n    \"\"\"\n    args = create_default_args(\n        {\n            \"cuda\": 0,\n            \"epochs\": 30,\n            \"learning_rate\": 1e-3,\n            \"train_mb_size\": 200,\n            \"seed\": 42,\n            \"dataset_root\": None,\n            \"prune_proportion\": [\n                0.90,\n                0.88,\n                0.87,\n                0.85,\n                0.83,\n                0.80,\n                0.75,\n                0.66,\n                0.50,\n                0.00,\n            ],\n            \"post_prune_epochs\": 15,\n        },\n        override_args,\n    )\n    set_seed(args.seed)\n    device = torch.device(\n        f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n    )\n\n    benchmark = avl.benchmarks.SplitTinyImageNet(\n        10, return_task_id=True, dataset_root=args.dataset_root\n    )\n    model = SingleHeadVGGSmall(n_classes=200)\n    model = PackNetModel(model)\n    criterion = CrossEntropyLoss()\n\n    interactive_logger = avl.logging.InteractiveLogger()\n\n    evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n        metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n        loggers=[interactive_logger],\n    )\n\n    cl_strategy = PackNet(\n        model,\n        Adam(model.parameters(), lr=args.learning_rate),\n        post_prune_epochs=args.post_prune_epochs,\n        prune_proportion=args.prune_proportion,\n        criterion=criterion,\n        train_mb_size=args.train_mb_size,\n        train_epochs=args.epochs,\n        eval_mb_size=128,\n        device=device,\n        evaluator=evaluation_plugin,\n    )\n\n    res = None\n    for experience in benchmark.train_stream:\n        cl_strategy.train(experience)\n        res = cl_strategy.eval(benchmark.test_stream)\n\n    return res\n\n\nif __name__ == \"__main__\":\n    res = packnet_stinyimagenet()\n    print(res)\n"
  },
  {
    "path": "experiments/utils.py",
    "content": "import random\nfrom types import SimpleNamespace\n\nimport numpy as np\nimport torch\n\nfrom avalanche.benchmarks import dataset_benchmark\n\n\ndef set_seed(seed):\n    if seed is None:\n        return\n    torch.manual_seed(seed)\n    torch.cuda.manual_seed(seed)\n    np.random.seed(seed)\n    random.seed(seed)\n    if torch.cuda.is_available():\n        torch.backends.cudnn.deterministic = True\n        torch.backends.cudnn.enabled = True\n        torch.backends.cudnn.benchmark = False\n\n\ndef create_default_args(args_dict, additional_args=None):\n    args = SimpleNamespace()\n    for k, v in args_dict.items():\n        args.__dict__[k] = v\n    if additional_args is not None:\n        for k, v in additional_args.items():\n            args.__dict__[k] = v\n    return args\n\n\ndef restrict_dataset_size(scenario, size: int):\n    \"\"\"\n    Util used to restrict the size of the datasets coming from a scenario\n    param: size: size of the reduced training dataset\n    \"\"\"\n    modified_train_ds = []\n    modified_test_ds = []\n    modified_valid_ds = []\n\n    if hasattr(scenario, \"valid_stream\"):\n        valid_list = list(scenario.valid_stream)\n\n    for i, train_ds in enumerate(scenario.train_stream):\n        train_ds_idx, _ = torch.utils.data.random_split(\n            torch.arange(len(train_ds.dataset)),\n            (size, len(train_ds.dataset) - size),\n        )\n        dataset = train_ds.dataset.subset(train_ds_idx)\n\n        modified_train_ds.append(dataset)\n        modified_test_ds.append(scenario.test_stream[i].dataset)\n        if hasattr(scenario, \"valid_stream\"):\n            modified_valid_ds.append(valid_list[i].dataset)\n\n    scenario = dataset_benchmark(\n        modified_train_ds,\n        modified_test_ds,\n        other_streams_datasets={\"valid\": modified_valid_ds}\n        if len(modified_valid_ds) > 0\n        else None,\n    )\n\n    return scenario\n"
  },
  {
    "path": "gitbisect_test.sh",
    "content": "#!/bin/bash\n# first mandatory argument with path to continual-learning-baselines repository\n# second optional argument with the test name to run. If not provided, all tests will be run\n\ncd $1\nif [ $# -ne 2 ]; then\n    python -m unittest\nelse\n    python -m unittest $2\nfi\n\nresult=$?\nif [ $? -ne 0 ]; then\n    exit 1 # 1 -> bad\nfi\nexit 0 # 0 -> good\n"
  },
  {
    "path": "models/__init__.py",
    "content": "from .models import *\nfrom .reduced_resnet18 import *\nfrom .vgg import MultiHeadVGG, MultiHeadVGGSmall, SingleHeadVGGSmall, VGGSmall\n"
  },
  {
    "path": "models/models.py",
    "content": "import avalanche.models\nfrom avalanche.models import MultiHeadClassifier, MultiTaskModule, BaseModel\nfrom torch import nn\n\n\nclass MultiHeadMLP(MultiTaskModule):\n    def __init__(self, input_size=28 * 28, hidden_size=256, hidden_layers=2,\n                 drop_rate=0, relu_act=True):\n        super().__init__()\n        self._input_size = input_size\n\n        layers = nn.Sequential(*(nn.Linear(input_size, hidden_size),\n                                 nn.ReLU(inplace=True) if relu_act else nn.Tanh(),\n                                 nn.Dropout(p=drop_rate)))\n        for layer_idx in range(hidden_layers - 1):\n            layers.add_module(\n                f\"fc{layer_idx + 1}\", nn.Sequential(\n                    *(nn.Linear(hidden_size, hidden_size),\n                      nn.ReLU(inplace=True) if relu_act else nn.Tanh(),\n                      nn.Dropout(p=drop_rate))))\n\n        self.features = nn.Sequential(*layers)\n        self.classifier = MultiHeadClassifier(hidden_size)\n\n    def forward(self, x, task_labels):\n        x = x.contiguous()\n        x = x.view(x.size(0), self._input_size)\n        x = self.features(x)\n        x = self.classifier(x, task_labels)\n        return x\n\n\nclass MLP(nn.Module, BaseModel):\n    def __init__(self, input_size=28 * 28, hidden_size=256, hidden_layers=2,\n                 output_size=10, drop_rate=0, relu_act=True, initial_out_features=0):\n        \"\"\"\n        :param initial_out_features: if >0 override output size and build an\n            IncrementalClassifier with `initial_out_features` units as first.\n        \"\"\"\n        super().__init__()\n        self._input_size = input_size\n\n        layers = nn.Sequential(*(nn.Linear(input_size, hidden_size),\n                                 nn.ReLU(inplace=True) if relu_act else nn.Tanh(),\n                                 nn.Dropout(p=drop_rate)))\n        for layer_idx in range(hidden_layers - 1):\n            layers.add_module(\n                f\"fc{layer_idx + 1}\", nn.Sequential(\n                    *(nn.Linear(hidden_size, hidden_size),\n                      nn.ReLU(inplace=True) if relu_act else nn.Tanh(),\n                      nn.Dropout(p=drop_rate))))\n\n        self.features = nn.Sequential(*layers)\n\n        if initial_out_features > 0:\n            self.classifier = avalanche.models.IncrementalClassifier(in_features=hidden_size,\n                                                                     initial_out_features=initial_out_features)\n        else:\n            self.classifier = nn.Linear(hidden_size, output_size)\n\n    def forward(self, x):\n        x = x.contiguous()\n        x = x.view(x.size(0), self._input_size)\n        x = self.features(x)\n        x = self.classifier(x)\n        return x\n\n    def get_features(self, x):\n        x = x.contiguous()\n        x = x.view(x.size(0), self._input_size)\n        return self.features(x)\n\n\nclass SI_CNN(MultiTaskModule):\n    def __init__(self, hidden_size=512):\n        super().__init__()\n        layers = nn.Sequential(*(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3), padding=(1, 1)),\n                                 nn.ReLU(inplace=True),\n                                 nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3)),\n                                 nn.ReLU(inplace=True),\n                                 nn.MaxPool2d((2, 2)),\n                                 nn.Dropout(p=0.25),\n                                 nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=(1, 1)),\n                                 nn.ReLU(inplace=True),\n                                 nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3)),\n                                 nn.ReLU(inplace=True),\n                                 nn.MaxPool2d((2, 2)),\n                                 nn.Dropout(p=0.25),\n                                 nn.Flatten(),\n                                 nn.Linear(2304, hidden_size),\n                                 nn.ReLU(inplace=True),\n                                 nn.Dropout(p=0.5)\n                                 ))\n        self.features = nn.Sequential(*layers)\n        self.classifier = MultiHeadClassifier(hidden_size, initial_out_features=10)\n\n    def forward(self, x, task_labels):\n        x = self.features(x)\n        x = self.classifier(x, task_labels)\n        return x\n\n\nclass FlattenP(nn.Module):\n    '''A nn-module to flatten a multi-dimensional tensor to 2-dim tensor.'''\n\n    def forward(self, x):\n        batch_size = x.size(0)   # first dimenstion should be batch-dimension.\n        return x.view(batch_size, -1)\n\n    def __repr__(self):\n        tmpstr = self.__class__.__name__ + '()'\n        return tmpstr\n\n\nclass MLP_gss(nn.Module):\n    def __init__(self, sizes, bias=True):\n        super(MLP_gss, self).__init__()\n        layers = []\n\n        for i in range(0, len(sizes) - 1):\n            if i < (len(sizes)-2):\n                layers.append(nn.Linear(sizes[i], sizes[i + 1]))\n                layers.append(nn.ReLU())\n            else:\n                layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=bias))\n\n        self.net = nn.Sequential(FlattenP(), *layers)\n\n    def forward(self, x):\n        return self.net(x)\n\n\n__all__ = ['MultiHeadMLP', 'MLP', 'SI_CNN', 'MLP_gss']\n"
  },
  {
    "path": "models/models_lamaml.py",
    "content": "import torch.nn as nn\nfrom avalanche.models.dynamic_modules import MultiTaskModule,\\\n    MultiHeadClassifier\n\n\n####################\n#     CIFAR-100\n####################\n\nclass ConvCIFAR(nn.Module):\n    def __init__(self, num_classes=10):\n        super(ConvCIFAR, self).__init__()\n        # Convolutional layers\n        self.conv_layers = nn.Sequential(\n            nn.Conv2d(3, 160, kernel_size=(3, 3), stride=2, padding=1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(160, 160, kernel_size=(3, 3), stride=2, padding=1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(160, 160, kernel_size=(3, 3), stride=2, padding=1),\n            nn.ReLU(inplace=True),\n        )\n        # Linear layers\n        self.relu = nn.ReLU(inplace=True)\n        self.linear1 = nn.Linear(16*160, 320)\n        self.linear2 = nn.Linear(320, 320)\n        # Classifier\n        self.classifier = nn.Linear(320, num_classes)\n\n    def forward(self, x):\n        x = self.conv_layers(x)\n        x = x.view(-1, 2560)\n        x = self.relu(self.linear1(x))\n        x = self.relu(self.linear2(x))\n\n        x = self.classifier(x)\n\n        return x\n\n\nclass MTConvCIFAR(ConvCIFAR, MultiTaskModule):\n    def __init__(self):\n        super(MTConvCIFAR, self).__init__()\n        # Classifier\n        self.classifier = MultiHeadClassifier(320)\n\n    def forward(self, x, task_labels):\n        x = self.conv_layers(x)\n        x = x.view(-1, 16*160)\n        x = self.relu(self.linear1(x))\n        x = self.relu(self.linear2(x))\n        x = self.classifier(x, task_labels)\n\n        return x\n\n\n####################\n#   TinyImageNet\n####################\n\nclass ConvTinyImageNet(nn.Module):\n    def __init__(self, num_classes=10):\n        super(ConvTinyImageNet, self).__init__()\n        # Convolutional layers\n        self.conv_layers = nn.Sequential(\n            nn.Conv2d(3, 160, kernel_size=(3, 3), stride=2, padding=1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(160, 160, kernel_size=(3, 3), stride=2, padding=1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(160, 160, kernel_size=(3, 3), stride=2, padding=1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(160, 160, kernel_size=(3, 3), stride=2, padding=1),\n            nn.ReLU(inplace=True),\n        )\n        # linear layers\n        self.relu = nn.ReLU(inplace=True)\n        self.linear1 = nn.Linear(16*160, 640)\n        self.linear2 = nn.Linear(640, 640)\n        # classifier\n        self.classifier = nn.Linear(640, num_classes)\n\n    def forward(self, x):\n        x = self.conv_layers(x)\n        x = x.view(-1, 16*160)\n        x = self.relu(self.linear1(x))\n        x = self.relu(self.linear2(x))\n        x = self.classifier(x)\n\n        return x\n\n\nclass MTConvTinyImageNet(ConvTinyImageNet, MultiTaskModule):\n    def __init__(self):\n        super(MTConvTinyImageNet, self).__init__()\n        # Classifier\n        self.classifier = MultiHeadClassifier(640)\n\n    def forward(self, x, task_labels):\n        x = self.conv_layers(x)\n        x = x.view(-1, 16*160)\n        x = self.relu(self.linear1(x))\n        x = self.relu(self.linear2(x))\n        x = self.classifier(x, task_labels)\n\n        return x\n"
  },
  {
    "path": "models/reduced_resnet18.py",
    "content": "import torch\nfrom avalanche.models import MultiHeadClassifier, MultiTaskModule\nfrom torch import nn, relu\nfrom torch.nn.functional import avg_pool2d\n\n\"\"\"\nSTART: FROM GEM CODE https://github.com/facebookresearch/GradientEpisodicMemory/\nCLASSIFIER REMOVED AND SUBSTITUTED WITH AVALANCHE MULTI-HEAD CLASSIFIER\n\"\"\"\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n    return nn.Conv2d(\n        in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True\n    )\n\n\nclass BasicBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, in_planes, planes, stride=1):\n        super(BasicBlock, self).__init__()\n        self.conv1 = conv3x3(in_planes, planes, stride)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = conv3x3(planes, planes)\n        self.bn2 = nn.BatchNorm2d(planes)\n\n        self.shortcut = nn.Sequential()\n        if stride != 1 or in_planes != self.expansion * planes:\n            self.shortcut = nn.Sequential(\n                nn.Conv2d(\n                    in_planes,\n                    self.expansion * planes,\n                    kernel_size=1,\n                    stride=stride,\n                    bias=True,\n                ),\n                nn.BatchNorm2d(self.expansion * planes),\n            )\n\n    def forward(self, x):\n        out = relu(self.bn1(self.conv1(x)))\n        out = self.bn2(self.conv2(out))\n        out += self.shortcut(x)\n        out = relu(out)\n        return out\n\n\nclass ResNet(nn.Module):\n    def __init__(self, block, num_blocks, nf):\n        super(ResNet, self).__init__()\n        self.in_planes = nf\n\n        self.conv1 = conv3x3(3, nf * 1)\n        self.bn1 = nn.BatchNorm2d(nf * 1)\n        self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1)\n        self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2)\n        self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2)\n        self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2)\n\n    def _make_layer(self, block, planes, num_blocks, stride):\n        strides = [stride] + [1] * (num_blocks - 1)\n        layers = []\n        for stride in strides:\n            layers.append(block(self.in_planes, planes, stride))\n            self.in_planes = planes * block.expansion\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        bsz = x.size(0)\n        out = relu(self.bn1(self.conv1(x.view(bsz, 3, 32, 32))))\n        out = self.layer1(out)\n        out = self.layer2(out)\n        out = self.layer3(out)\n        out = self.layer4(out)\n        out = avg_pool2d(out, 4)\n        return out\n\n\n\"\"\"\nEND: FROM GEM CODE\n\"\"\"\n\nclass MultiHeadReducedResNet18(MultiTaskModule):\n    \"\"\"\n    As from GEM paper, a smaller version of ResNet18, with three times less feature maps across all layers.\n    It employs multi-head output layer.\n    \"\"\"\n\n    def __init__(self, size_before_classifier=160):\n        super().__init__()\n        self.resnet = ResNet(BasicBlock, [2, 2, 2, 2], 20)\n        self.classifier = MultiHeadClassifier(size_before_classifier)\n\n    def forward(self, x, task_labels):\n        out = self.resnet(x)\n        out = out.view(out.size(0), -1)\n        return self.classifier(out, task_labels)\n\n\nclass SingleHeadReducedResNet18(torch.nn.Module):\n    def __init__(self, num_classes):\n        super().__init__()\n        self.resnet = ResNet(BasicBlock, [2, 2, 2, 2], 20)\n        self.classifier = nn.Linear(160, num_classes)\n\n    def feature_extractor(self, x):\n        out = self.resnet(x)\n        return out.view(out.size(0), -1)\n\n    def forward(self, x):\n        out = self.feature_extractor(x)\n        return self.classifier(out)\n\n\n__all__ = ['MultiHeadReducedResNet18', 'SingleHeadReducedResNet18']\n"
  },
  {
    "path": "models/vgg.py",
    "content": "import torch\nfrom torch import nn\nimport torchvision\nfrom avalanche.models import MultiTaskModule, MultiHeadClassifier\n\n\nclass MultiHeadVGG(MultiTaskModule):\n    def __init__(self, n_classes=20):\n        super().__init__()\n        self.vgg = torchvision.models.vgg11()\n        self.classifier = MultiHeadClassifier(in_features=1000, initial_out_features=n_classes)\n\n    def forward(self, x, task_labels):\n        x = self.vgg(x)\n        x = torch.flatten(x, 1)\n        return self.classifier(x, task_labels)\n\n\n\"\"\"\nSmall VGG net adapted from https://github.com/Mattdl/CLsurvey/\n\"\"\"\n\ncfg = [64, 'M', 64, 'M', 64, 64, 'M', 128, 128, 'M']\nconv_kernel_size = 3\nimg_input_channels = 3\n\n\nclass VGGSmall(torchvision.models.VGG):\n    \"\"\"\n    Creates VGG feature extractor from config and custom classifier.\n    \"\"\"\n\n    def __init__(self):\n\n        in_channels = img_input_channels\n        layers = []\n        for v in cfg:\n            if v == 'M':\n                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n            else:\n                conv2d = nn.Conv2d(in_channels, v, kernel_size=conv_kernel_size, padding=1)\n                layers += [conv2d, nn.ReLU(inplace=True)]\n                in_channels = v\n\n        super(VGGSmall, self).__init__(nn.Sequential(*layers), init_weights=True)\n\n        if hasattr(self, 'avgpool'):  # Compat Pytorch>1.0.0\n            self.avgpool = torch.nn.Identity()\n\n        del self.classifier\n\n    def forward(self, x):\n        x = self.features(x)\n        return x\n\n\nclass MultiHeadVGGSmall(MultiTaskModule):\n    def __init__(self, n_classes=200, hidden_size=128):\n        super().__init__()\n        self.vgg = VGGSmall()\n        self.feedforward = nn.Sequential(\n            nn.Linear(128*4*4, hidden_size),\n            nn.ReLU(True),\n            nn.Linear(hidden_size, hidden_size),\n            nn.ReLU(True),\n        )\n        self.classifier = MultiHeadClassifier(in_features=128,\n                                              initial_out_features=n_classes)\n\n    def forward(self, x, task_labels):\n        x = self.vgg(x)\n        x = torch.flatten(x, 1)\n        x = self.feedforward(x)\n        return self.classifier(x, task_labels)\n\n\nclass SingleHeadVGGSmall(nn.Module):\n    def __init__(self, n_classes=200, hidden_size=128):\n        super().__init__()\n        self.vgg = VGGSmall()\n        self.feedforward = nn.Sequential(\n            nn.Linear(128 * 4 * 4, hidden_size),\n            nn.ReLU(True),\n            nn.Linear(hidden_size, hidden_size),\n            nn.ReLU(True),\n        )\n        self.classifier = nn.Linear(hidden_size, n_classes)\n\n    def forward(self, x):\n        x = self.vgg(x)\n        x = torch.flatten(x, 1)\n        x = self.feedforward(x)\n        return self.classifier(x)\n"
  },
  {
    "path": "tests/__init__.py",
    "content": "from .synaptic_intelligence import SynapticIntelligence\nfrom .cope import COPE\nfrom .dslda import DSLDA\nfrom .ewc import EWC\nfrom .mas import MAS\nfrom .agem import AGEM\nfrom .gem import GEM\nfrom .lwf import LwF\nfrom .gss import GSS\nfrom .mir import MIR\nfrom .iCARL import iCARL\nfrom .gdumb import GDumb\nfrom .lfl import LFL\nfrom .lamaml import LaMAML\nfrom .generative_replay import GenerativeReplay\nfrom .rwalk import RWalk\nfrom .scr import SCR\nfrom .er_ace import ER_ACE\nfrom .er_aml import ER_AML\nfrom .packnet import PackNet\nfrom . import utils\n"
  },
  {
    "path": "tests/agem/__init__.py",
    "content": "from .experiment import AGEM\n"
  },
  {
    "path": "tests/agem/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.permuted_mnist import agem_pmnist\nfrom experiments.split_cifar100 import agem_scifar100\n\n\nclass AGEM(unittest.TestCase):\n    \"\"\"\n    Reproducing Average-GEM experiments from paper\n    \"Efficient Lifelong Learning with A-GEM\" by Chaudhry et. al. (2019).\n    https://openreview.net/pdf?id=Hkf2_sC5FX\n    The main difference with the original paper is that we do not append any task descriptor\n    to the model input.\n    We train on the last 17 experiences since we apply the evaluation protocol defined\n    in the paper but we do not perform model selection.\n    \"\"\"\n    def test_pmnist(self):\n        \"\"\"Permuted MNIST benchmark\"\"\"\n        res = agem_pmnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"AGEM-PMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('agem', 'pmnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n\n    def test_scifar100(self):\n        \"\"\"Split CIFAR-100 benchmark\"\"\"\n        res = agem_scifar100({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"AGEM-SCIFAR100 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('agem', 'scifar100'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.04)\n"
  },
  {
    "path": "tests/cope/__init__.py",
    "content": "from .experiment import COPE\n"
  },
  {
    "path": "tests/cope/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_mnist import cope_smnist\n\n\nclass COPE(unittest.TestCase):\n    \"\"\"\n    Reproducing CoPE experiments from the paper\n    \"Continual prototype evolution: Learning online from non-stationary data streams\"\n    by De Lange et. al. (2021).\n    https://arxiv.org/abs/2009.00919\n    \"\"\"\n\n    def test_smnist(self):\n        \"\"\"Split MNIST benchmark\"\"\"\n        res = cope_smnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"COPE-SMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('cope', 'smnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/dslda/__init__.py",
    "content": "from .experiment import DSLDA\n"
  },
  {
    "path": "tests/dslda/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.core50 import deep_slda_core50\n\n\nclass DSLDA(unittest.TestCase):\n    \"\"\"\n    Reproducing Streaming Deep LDA experiments from the paper\n    \"Lifelong Machine Learning with Deep Streaming Linear Discriminant Analysis\"\n    by Hayes et. al. (2020).\n    https://arxiv.org/abs/1909.01520\n    \"\"\"\n    def test_core50(self):\n        \"\"\"CORe50 New Classes benchmark\"\"\"\n        res = deep_slda_core50({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"DSLDA-CORe50 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('dslda', 'core50'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/er_ace/__init__.py",
    "content": "from .experiment import ER_ACE\n"
  },
  {
    "path": "tests/er_ace/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_cifar10 import erace_scifar10\nfrom experiments.split_cifar100 import erace_scifar100\n\n\nclass ER_ACE(unittest.TestCase):\n    \"\"\"\n    Reproducing ER-ACE experiments from paper\n    \"New insights on Reducing Abrupt Representation Change in Online Continual Learning\" \n    by Lucas Caccia et. al \n    https://openreview.net/forum?id=N8MaByOzUfb\n    \"\"\"\n\n    def test_scifar10(self):\n        \"\"\"Split CIFAR-10 benchmark\"\"\"\n        res = erace_scifar10({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"ER_ACE-SCIFAR10 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('er_ace', 'scifar10'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n\n    def test_scifar100(self):\n        \"\"\"Split CIFAR-100 benchmark\"\"\"\n        res = erace_scifar100({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"ER_ACE-SCIFAR100 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('er_ace', 'scifar100'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/er_aml/__init__.py",
    "content": "from .experiment import ER_AML\n"
  },
  {
    "path": "tests/er_aml/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_cifar10 import eraml_scifar10\nfrom experiments.split_cifar100 import eraml_scifar100\n\n\nclass ER_AML(unittest.TestCase):\n    \"\"\"\n    Reproducing ER-AML experiments from paper\n    \"New insights on Reducing Abrupt Representation Change in Online Continual Learning\"\n    by Lucas Caccia et. al\n    https://openreview.net/forum?id=N8MaByOzUfb\n    \"\"\"\n\n    @unittest.skip(\"ER-AML is not yet in avalanche\")\n    def test_scifar10(self):\n        \"\"\"Split CIFAR-10 benchmark\"\"\"\n        res = eraml_scifar10({\"seed\": 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"ER_AML-SCIFAR10 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result(\"er_aml\", \"scifar10\"))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n\n    @unittest.skip(\"ER-AML is not yet in avalanche\")\n    def test_scifar100(self):\n        \"\"\"Split CIFAR-100 benchmark\"\"\"\n        res = eraml_scifar100({\"seed\": 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"ER_AML-SCIFAR100 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result(\"er_aml\", \"scifar100\"))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/ewc/__init__.py",
    "content": "from .experiment import EWC\n"
  },
  {
    "path": "tests/ewc/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.permuted_mnist import ewc_pmnist\n\n\nclass EWC(unittest.TestCase):\n    \"\"\"\n    Reproducing Elastic Weight Consolidation experiments from paper\n    \"Overcoming catastrophic forgetting in neural networks\" by Kirkpatrick et. al. (2017).\n    https://www.pnas.org/content/114/13/3521\n    \"\"\"\n\n    def test_pmnist(self):\n        \"\"\"Permuted MNIST benchmark\"\"\"\n        res = ewc_pmnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"EWC-PMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('ewc', 'pmnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/gdumb/__init__.py",
    "content": "from .experiment import GDumb\n"
  },
  {
    "path": "tests/gdumb/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_mnist import gdumb_smnist\n\n\nclass GDumb(unittest.TestCase):\n    \"\"\"\n    Reproducing GDumb experiments from paper\n    \"GDumb: A Simple Approach that Questions Our Progress in Continual Learning\" by Prabhu et. al. (2020).\n    https://link.springer.com/chapter/10.1007/978-3-030-58536-5_31\n    \"\"\"\n\n    def test_smnist(self):\n        \"\"\"Split MNIST benchmark\"\"\"\n        res = gdumb_smnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"GDumb-SMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('gdumb', 'smnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/gem/__init__.py",
    "content": "from .experiment import GEM\n"
  },
  {
    "path": "tests/gem/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.permuted_mnist import gem_pmnist\nfrom experiments.split_cifar100 import gem_scifar100\n\n\nclass GEM(unittest.TestCase):\n    \"\"\"\n    Reproducing GEM experiments from paper\n    \"Gradient Episodic Memory for Continual Learning\" by Lopez-paz et. al. (2017).\n    https://proceedings.neurips.cc/paper/2017/hash/f87522788a2be2d171666752f97ddebb-Abstract.html\n    \"\"\"\n\n    def test_pmnist(self):\n        \"\"\"Permuted MNIST benchmark\"\"\"\n        res = gem_pmnist({'seed': 0, 'n_exp': 5})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"GEM-PMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('gem', 'pmnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n\n    def test_scifar100(self):\n        \"\"\"Split CIFAR-100 benchmark\"\"\"\n        res = gem_scifar100({'seed': 435342})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"GEM-SCIFAR100 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('gem', 'scifar100'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/generative_replay/__init__.py",
    "content": "from .experiment import GenerativeReplay\n"
  },
  {
    "path": "tests/generative_replay/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_mnist import generative_replay_smnist\n\n\nclass GenerativeReplay(unittest.TestCase):\n    \"\"\"\n    \"Continual Learning with Deep Generative Replay\" by Shin et. al. (2017).\n    https://arxiv.org/abs/1705.08690\n    \"\"\"\n\n    def test_smnist(self):\n        \"\"\"Split MNIST benchmark\"\"\"\n        res = generative_replay_smnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"GenerativeReplay-SMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('generative_replay', 'smnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/gss/__init__.py",
    "content": "from .experiment import GSS\n"
  },
  {
    "path": "tests/gss/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_mnist import gss_smnist\n\n\nclass GSS(unittest.TestCase):\n    \"\"\" GSS experiments from the original paper.\n\n    This example the strategy GSS_greedy on Split MNIST.\n    The final accuracy is around 77.96% (std 3.5)\n\n    reference: https://arxiv.org/abs/1903.08671\n    \"\"\"\n\n    def test_smnist(self):\n        \"\"\"Split MNIST benchmark\"\"\"\n        res = gss_smnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"GSS-Split MNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('gss', 'smnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/iCARL/__init__.py",
    "content": "from .experiment import iCARL\n"
  },
  {
    "path": "tests/iCARL/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_target_result, get_average_metric\nfrom experiments.split_cifar100 import icarl_scifar100\n\n\nclass iCARL(unittest.TestCase):\n    \"\"\"\n        Reproducing iCaRL experiments from paper\n        \"iCaRL: Incremental Classifier and Representation Learning\",\n        Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, Christoph H. Lampert;\n        Proceedings of the IEEE Conference on\n        Computer Vision and Pattern Recognition (CVPR), 2017, pp. 2001-2010\n        https://openaccess.thecvf.com/content_cvpr_2017/html/Rebuffi_iCaRL_Incremental_Classifier_CVPR_2017_paper.html\n    \"\"\"\n\n    def test_scifar100(self):\n        \"\"\"\n            scifar100 with 10 batches\n        \"\"\"\n        res = icarl_scifar100({'seed': 0})\n        acc = get_average_metric(res)\n        target_acc = get_target_result('iCaRL', 'scifar100')\n        print(f\"iCarl SCIFAR-100: ACC: {acc:.5f}\")\n\n        if target_acc > acc:\n            self.assertAlmostEqual(target_acc, acc, delta=0.03)\n"
  },
  {
    "path": "tests/lamaml/__init__.py",
    "content": "from .experiment import LaMAML\n"
  },
  {
    "path": "tests/lamaml/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_target_result, get_average_metric\nfrom experiments.split_cifar100 import lamaml_scifar100\nfrom experiments.split_tiny_imagenet import lamaml_stinyimagenet\n\n\nclass LaMAML(unittest.TestCase):\n    \"\"\"\n        Reproducing LaMAML experiments from paper\n        \"La-MAML: Look-ahead Meta Learning for Continual Learning\",\n        Gunshi Gupta, Karmesh Yadav, Liam Paull;\n        NeurIPS, 2020\n        https://arxiv.org/abs/2007.13904\n    \"\"\"\n\n    def test_scifar100(self):\n        \"\"\"\n            scifar100, multi-pass\n        \"\"\"\n        res = lamaml_scifar100({'seed': 498235})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"LaMAML-SCIFAR100 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('lamaml', 'scifar100'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n\n    def test_stinyimagenet(self):\n        \"\"\"\n            stinyimagenet, multi-pass\n        \"\"\"\n        res = lamaml_stinyimagenet({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"LaMAML-SplitTinyImageNet Average Stream Accuracy: \" + \\\n              f\"{avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('lamaml', 'stiny-imagenet'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/lfl/__init__.py",
    "content": "from .experiment import LFL\n"
  },
  {
    "path": "tests/lfl/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_target_result, get_average_metric\nfrom experiments.permuted_mnist import lfl_pmnist\n\n\nclass LFL(unittest.TestCase):\n    \"\"\"\n        Reproducing Less Forgetful Learning experiments\n        \"Less-forgetting Learning in Deep Neural Networks\"\n        Heechul Jung, Jeongwoo Ju, Minju Jung and Junmo Kim;\n        arXiv, 2016, https://arxiv.org/pdf/1607.00122.pdf\n    \"\"\"\n\n    def test_pmnist(self):\n        res = lfl_pmnist({'seed': 0})\n        exps_acc = []\n        for k, v in res.items():\n            if k.startswith('Top1_Acc_Exp'):\n                exps_acc.append(v)\n        target_acc = get_target_result('lfl', 'pmnist')\n        print(f\"LFL-PMNIST Experiences Accuracy: {exps_acc}\")\n\n        # each experience accuracy should be at least target acc\n        for el in exps_acc:\n            if target_acc > el:\n                self.assertAlmostEqual(target_acc, el, delta=0.03)\n"
  },
  {
    "path": "tests/lwf/__init__.py",
    "content": "from .experiment import LwF\n"
  },
  {
    "path": "tests/lwf/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_mnist import lwf_smnist\nfrom experiments.split_tiny_imagenet import lwf_stinyimagenet\n\n\nclass LwF(unittest.TestCase):\n    \"\"\"\n    Reproducing Learning without Forgetting. Original paper is\n    \"Learning without Forgetting\" by Li et. al. (2016).\n    http://arxiv.org/abs/1606.09282\n    Since experimental setup of the paper is quite outdated and not\n    easily reproducible, this class reproduces LwF experiments\n    on Split MNIST from\n    \"Three scenarios for continual learning\" by van de Ven et. al. (2018).\n    https://arxiv.org/pdf/1904.07734.pdf\n    We managed to surpass the performances reported in the paper by slightly\n    changing the model architecture or the training hyperparameters.\n    Experiments on Tiny Image Net are taken from\n    \"A continual learning survey: Defying forgetting in classification tasks\" De Lange et. al. (2021).\n    https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9349197\n    \"\"\"\n\n    def test_smnist(self):\n        \"\"\"Split MNIST benchmark\"\"\"\n        res = lwf_smnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"LwF-SMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('lwf', 'smnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.01)\n\n    def test_stinyimagenet(self):\n        \"\"\"Split Tiny ImageNet benchmark\"\"\"\n        res = lwf_stinyimagenet({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"LwF-SplitTinyImageNet Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('lwf', 'stiny-imagenet'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/mas/__init__.py",
    "content": "from .experiment import MAS\n"
  },
  {
    "path": "tests/mas/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_tiny_imagenet import mas_stinyimagenet\n\n\nclass MAS(unittest.TestCase):\n    \"\"\"\n    Reproducing Memory Aware Synapses experiments from paper\n    \"A continual learning survey: Defying forgetting in classification tasks\"\n    by De Lange et al.\n    https://doi.org/10.1109/TPAMI.2021.3057446\n    \"\"\"\n\n    def test_stinyimagenet(self):\n        \"\"\"Split Tiny ImageNet benchmark\"\"\"\n        res = mas_stinyimagenet({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(\"MAS-SplitTinyImageNet Average \"\n              f\"Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        # Recover target from CSV\n        target = get_target_result('mas', 'stiny-imagenet')\n        if isinstance(target, list):\n            target_acc = target[0]\n        else:\n            target_acc = target\n        target_acc = float(target_acc)\n\n        print(f\"The target value was {target_acc:.2f}\")\n\n        # Check if the result is close to the target\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/mir/__init__.py",
    "content": "from .experiment import MIR\n"
  },
  {
    "path": "tests/mir/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.permuted_mnist import mir_pmnist\nfrom experiments.split_mnist import mir_smnist\nfrom experiments.split_cifar10 import mir_scifar10\n\n\nclass MIR(unittest.TestCase):\n    \"\"\"\n    Reproducing MIR experiments from paper\n    \"Online Continual Learning With Maximally Interfered Retrieval\" by R. Aljundi et. al. (2019)\n    https://papers.nips.cc/paper/2019/file/15825aee15eb335cc13f9b559f166ee8-MetaReview.html\n    \"\"\"\n\n    def test_pmnist(self):\n        \"\"\"Permuted MNIST benchmark\"\"\"\n        res = mir_pmnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"MIR-PMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('mir', 'pmnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n\n    def test_smnist(self):\n        \"\"\"Split MNIST benchmark\"\"\"\n        res = mir_smnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"MIR-SMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('mir', 'smnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n\n    def test_scifar10(self):\n        \"\"\"Split CIFAR-10 benchmark\"\"\"\n        res = mir_scifar10({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"MIR-SCIFAR10 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('mir', 'scifar10'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/packnet/__init__.py",
    "content": "from .experiment import PackNet\n"
  },
  {
    "path": "tests/packnet/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_tiny_imagenet import packnet_stinyimagenet\n\n\nclass PackNet(unittest.TestCase):\n    \"\"\"\n    Reproduce Delange et al. (2021) benchmark results for PackNet\n    (Mallya & Lazebnik, 2018) on Split Tiny ImageNet\n\n    Delange, M., Aljundi, R., Masana, M., Parisot, S., Jia, X., Leonardis, A.,\n      Slabaugh, G., & Tuytelaars, T. (2021). A continual learning survey: Defying\n      forgetting in classification tasks. IEEE Transactions on Pattern Analysis\n      and Machine Intelligence, 1–1. https://doi.org/10.1109/TPAMI.2021.3057446\n\n    Mallya, A., & Lazebnik, S. (2018). PackNet: Adding Multiple Tasks to a Single\n      Network by Iterative Pruning. 2018 IEEE/CVF Conference on Computer Vision\n      and Pattern Recognition, 7765–7773. https://doi.org/10.1109/CVPR.2018.00810\n    \"\"\"\n\n    def test_stinyimagenet(self):\n        res = packnet_stinyimagenet({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"PackNet-STinyImagenet Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('packnet', 'stiny-imagenet'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/rwalk/__init__.py",
    "content": "from .experiment import RWalk\n"
  },
  {
    "path": "tests/rwalk/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_mnist import rwalk_smnist\n\n\nclass RWalk(unittest.TestCase):\n    \"\"\"\n    Reproducing RWalk experiments from paper\n    \"Riemannian Walk for Incremental Learning:\n    Understanding Forgetting and Intransigence\" by Chaudhry et. al. (2018).\n    https://openaccess.thecvf.com/content_ECCV_2018/html/Arslan_Chaudhry__Riemannian_Walk_ECCV_2018_paper.html\n    \"\"\"\n\n    def test_smnist(self):\n        \"\"\"Split MNIST benchmark\"\"\"\n        res = rwalk_smnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"RWALK-SMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('rwalk', 'smnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.01)"
  },
  {
    "path": "tests/scr/__init__.py",
    "content": "from .experiment import SCR\n"
  },
  {
    "path": "tests/scr/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_cifar10 import online_scr_scifar10\n\n\nclass SCR(unittest.TestCase):\n    \"\"\"\n    Reproducing Supervised Contrastive Replay paper\n    \"Supervised Contrastive Replay: Revisiting the Nearest Class Mean Classifier\n    in Online Class-Incremental Continual Learning\" by Mai et. al. (2021).\n    https://arxiv.org/abs/2103.13885\n    \"\"\"\n\n    def test_scifar10(self):\n        \"\"\"Split CIFAR-10 benchmark\"\"\"\n        res = online_scr_scifar10({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"SCR-SCIFAR10 Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('scr', 'scifar10'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)"
  },
  {
    "path": "tests/synaptic_intelligence/__init__.py",
    "content": "from .experiment import SynapticIntelligence\n"
  },
  {
    "path": "tests/synaptic_intelligence/experiment.py",
    "content": "import unittest\nfrom tests.utils import get_average_metric, get_target_result\nfrom experiments.split_mnist import synaptic_intelligence_smnist\nfrom experiments.permuted_mnist import synaptic_intelligence_pmnist\n\n\nclass SynapticIntelligence(unittest.TestCase):\n    \"\"\"\n    Reproducing Synaptic Intelligence experiments from paper\n    \"Continual Learning Through Synaptic Intelligence\" by Zenke et. al. (2017).\n    http://proceedings.mlr.press/v70/zenke17a.html\n    \"\"\"\n\n    def test_smnist(self):\n        \"\"\"Split MNIST benchmark\"\"\"\n        res = synaptic_intelligence_smnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"SI-SMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('si', 'smnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.01)\n\n    def test_pmnist(self):\n        \"\"\"Permuted MNIST benchmark\"\"\"\n        res = synaptic_intelligence_pmnist({'seed': 0})\n        avg_stream_acc = get_average_metric(res)\n        print(f\"SI-PMNIST Average Stream Accuracy: {avg_stream_acc:.2f}\")\n\n        target_acc = float(get_target_result('si', 'pmnist'))\n        if target_acc > avg_stream_acc:\n            self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)\n"
  },
  {
    "path": "tests/target_results.csv",
    "content": "# result column can be either a number (int/float)\n# or a list in the format [number number number ...]\n# use spaces, not commas, to separate elements of the list\nstrategy,benchmark,result\nsi,smnist,0.97\nsi,pmnist,0.83\ncope,smnist,0.93\ndslda,core50,0.79\newc,pmnist,0.83\nagem,pmnist,0.81\nagem,scifar100,0.54\ngem,pmnist,0.93\ngem,scifar100,0.63\nlwf,smnist,0.23\nlwf,stiny-imagenet,0.44\ngss,smnist,0.78\niCaRL,scifar100,0.48\ngdumb,smnist,0.97\nrwalk,smnist,0.99\nmas,stiny-imagenet,0.4\nlfl,pmnist,0.88\nmir,scifar10,0.46\nmir,smnist,0.87\nmir,pmnist,0.80\nlamaml,scifar100,0.70\nlamaml,stiny-imagenet,0.54\ngenerative_replay,smnist,0.75\ner_ace,scifar10,0.45\ner_ace,scifar100,0.24\nreplay, scifar100, 0.32\nonline_replay, scifar100, 0.21\nonline_replay, scifar10, 0.50\nonline_replay, smnist, 0.92\nscr,scifar10,0.36\ner_aml,scifar10,0.47\ner_aml,scifar100,0.24\npacknet,stiny-imagenet,0.46"
  },
  {
    "path": "tests/test_template.py",
    "content": "import unittest\nimport torch\nimport avalanche as avl\nfrom tests.utils import get_average_metric, get_target_result\n\n\n@unittest.skip(\"Just a template, skipping this test.\")  # remove this when implementing the test\nclass StrategyName(unittest.TestCase):\n    def test_benchmarkname(self):\n        pass\n\n        #####################\n        # FILL HERE         #\n        #####################\n        # Create your experiment with Avalanche and put it in the `experiments` folder\n        # in the project root directory\n        # get the final results into the res variable\n        # res = mystrategy_benchmark(args)\n\n        #####################\n        # Process results   #\n        #####################\n        # you may find useful the already imported functions\n        # `get_average_metric` and `get_target_result`\n\n        # example:\n        # acc = get_average_metric(res)\n        # target_acc = float(get_target_result('strategy', 'benchmark'))\n\n        # check that your current result meets the expected result\n\n        # example:\n        # if target_acc > avg_stream_acc:\n        #   self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.02)\n"
  },
  {
    "path": "tests/utils.py",
    "content": "from pathlib import Path\nimport inspect\nfrom pandas import read_csv\nimport os\nimport tests\n\n\ndef pandas_to_list(input_str):\n    return [float(el) for el in input_str.strip('[] ').split(' ')]\n\n\ndef get_target_result(strat_name: str, bench_name: str):\n    \"\"\"\n    Read the target_results.csv file and retrieve the target performance for\n    the given strategy on the given benchmark.\n    :param strat_name: strategy name as found in the target file\n    :param bench_name: benchmark name as found in the target file\n    :return: target performance (either a float or a list of floats)\n    \"\"\"\n\n    p = os.path.join(Path(inspect.getabsfile(tests)).parent, 'target_results.csv')\n    data = read_csv(p, sep=',', comment='#')\n    target = data[(data['strategy'] == strat_name) & (data['benchmark'] == bench_name)]['result'].values[0]\n    if isinstance(target, str) and target.startswith('[') and target.endswith(']'):\n        target = pandas_to_list(target)\n    else:\n        target = float(target)\n    return target\n\n\ndef get_average_metric(metric_dict: dict, metric_name: str = 'Top1_Acc_Stream'):\n    \"\"\"\n    Compute the average of a metric based on the provided metric name.\n    The average is computed across the instance of the metrics containing the\n    given metric name in the input dictionary.\n    :param metric_dict: dictionary containing metric name as keys and metric value as value.\n        This dictionary is usually returned by the `eval` method of Avalanche strategies.\n    :param metric_name: the metric name (or a part of it), to be used as pattern to filter the dictionary\n    :return: a number representing the average of all the metric containing `metric_name` in their name\n    \"\"\"\n\n    avg_stream_acc = []\n    for k, v in metric_dict.items():\n        if k.startswith(metric_name):\n            avg_stream_acc.append(v)\n    return sum(avg_stream_acc) / float(len(avg_stream_acc))\n"
  }
]