[
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n\n# PyCharm\n.idea/\n"
  },
  {
    "path": ".travis.yml",
    "content": "dist: xenial\nlanguage: python\npython:\n  - \"3.6\"\n# command to install dependencies\ninstall:\n  - pip install -r requirements.txt\n  - wget https://www.dropbox.com/s/pzljfuwzo8hpb18/mnist.zip?dl=0 -O mnist.zip\n  - mkdir ~/.pipeline\n  - mkdir ~/.pipeline/mnist\n  - unzip mnist.zip -d ~/.pipeline/mnist/\n  - free -g\n# command to run tests\nscript:\n  - pytest -vsx\n \n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2019 Pavel Ostyakov\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Pipeline\r\n\r\n## How to run training\r\n\r\nFirst of all, create a config. You may find some examples of configs in folders mnist_pipeline, cifar_pipeline and imagenet_pipeline.\r\nThen, call:\r\n\r\n`python3 bin/train.py path_to_config`\r\n\r\n\r\nFor example, for reproducing results from Fixup paper just call:\r\n\r\n`python3 bin/train.py cifar_pipeline/configs/resnet110_fixup.py`\r\n"
  },
  {
    "path": "bin/predict.py",
    "content": "from pipeline.utils import load_predict_config, run_predict\n\nimport argparse\n\n\ndef main():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"config_path\")\n    args = parser.parse_args()\n\n    config = load_predict_config(args.config_path)\n    run_predict(config)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "bin/train.py",
    "content": "from pipeline.utils import load_config, run_train\n\nimport argparse\n\n\ndef main():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"config_path\")\n    args = parser.parse_args()\n\n    config = load_config(args.config_path)\n    run_train(config)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "cifar_pipeline/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/base.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom torchvision.transforms import ToTensor\n\nfrom cifar_pipeline.dataset import CIFARImagesDataset, CIFARTargetsDataset\nfrom pipeline.config_base import ConfigBase\nfrom pipeline.datasets.base import DatasetWithPostprocessingFunc, DatasetComposer, OneHotTargetsDataset\nfrom pipeline.datasets.mixup import MixUpDatasetWrapper\nfrom pipeline.losses.vector_cross_entropy import VectorCrossEntropy\nfrom pipeline.metrics.accuracy import MetricsCalculatorAccuracy\nfrom pipeline.schedulers.learning_rate.reduce_on_plateau import SchedulerWrapperLossOnPlateau\nfrom pipeline.trainers.classification import TrainerClassification\n\nTRAIN_DATASET_PATH = \"~/.pipeline/cifar/train\"\nTEST_DATASET_PATH = \"~/.pipeline/cifar/test\"\n\n\ndef get_dataset(path, transforms, train, use_mixup):\n    images_dataset = DatasetWithPostprocessingFunc(\n        CIFARImagesDataset(path=path, train=train, download=True),\n        transforms)\n\n    targets_dataset = CIFARTargetsDataset(path=path, train=train)\n    if use_mixup:\n        targets_dataset = OneHotTargetsDataset(targets_dataset, 10)\n\n    return DatasetComposer([images_dataset, targets_dataset])\n\n\nclass ConfigCIFARBase(ConfigBase):\n    def __init__(self, model, model_save_path, num_workers=8, batch_size=128, transforms=None,\n                 epoch_count=200, print_frequency=10, mixup_alpha=0):\n        optimizer = optim.SGD(\n            model.parameters(),\n            lr=0.1,\n            momentum=0.9,\n            weight_decay=5e-4)\n\n        scheduler = SchedulerWrapperLossOnPlateau(optimizer)\n        loss = nn.CrossEntropyLoss()\n        metrics_calculator = MetricsCalculatorAccuracy()\n        trainer_cls = TrainerClassification\n\n        if transforms is None:\n            transforms = ToTensor()\n\n        train_dataset = get_dataset(path=TRAIN_DATASET_PATH, transforms=transforms, train=True,\n                                    use_mixup=mixup_alpha > 0)\n        val_dataset = get_dataset(path=TEST_DATASET_PATH, transforms=transforms, train=False,\n                                  use_mixup=mixup_alpha > 0)\n\n        if mixup_alpha > 0:\n            train_dataset = MixUpDatasetWrapper(train_dataset, alpha=mixup_alpha)\n            loss = VectorCrossEntropy()\n\n        super().__init__(\n            model=model,\n            model_save_path=model_save_path,\n            optimizer=optimizer,\n            scheduler=scheduler,\n            loss=loss,\n            metrics_calculator=metrics_calculator,\n            batch_size=batch_size,\n            num_workers=num_workers,\n            train_dataset=train_dataset,\n            val_dataset=val_dataset,\n            trainer_cls=trainer_cls,\n            print_frequency=print_frequency,\n            epoch_count=epoch_count,\n            device=\"cpu\")\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/base.py",
    "content": "from cifar_pipeline.dataset import CIFARImagesDataset, CIFARTargetsDataset\n\nfrom pipeline.config_base import ConfigBase\nfrom pipeline.schedulers.learning_rate.reduce_on_plateau import SchedulerWrapperLossOnPlateau\nfrom pipeline.metrics.accuracy import MetricsCalculatorAccuracy\nfrom pipeline.datasets.base import DatasetWithPostprocessingFunc, DatasetComposer, OneHotTargetsDataset\nfrom pipeline.trainers.classification import TrainerClassification\nfrom pipeline.datasets.mixup import MixUpDatasetWrapper\nfrom pipeline.losses.vector_cross_entropy import VectorCrossEntropy\n\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom torchvision.transforms import ToTensor, Compose, Normalize\n\n\nTRAIN_DATASET_PATH = \"~/.pipeline/cifar/train\"\nTEST_DATASET_PATH = \"~/.pipeline/cifar/test\"\n\n\ndef get_dataset(path, transforms, train, use_mixup):\n    images_dataset = DatasetWithPostprocessingFunc(\n        CIFARImagesDataset(path=path, train=train, download=True),\n        transforms)\n\n    targets_dataset = CIFARTargetsDataset(path=path, train=train)\n    if use_mixup:\n        targets_dataset = OneHotTargetsDataset(targets_dataset, 10)\n\n    return DatasetComposer([images_dataset, targets_dataset])\n\n\nclass ConfigCIFARBase(ConfigBase):\n    def __init__(self, model, model_save_path, num_workers=8, batch_size=128, transforms=None,\n                 epoch_count=200, print_frequency=10, use_mixup=False):\n        parameters_bias = [p[1] for p in model.named_parameters() if 'bias' in p[0]]\n        parameters_scale = [p[1] for p in model.named_parameters() if 'scale' in p[0]]\n        parameters_others = [p[1] for p in model.named_parameters() if not ('bias' in p[0] or 'scale' in p[0])]\n\n        optimizer = optim.SGD(\n                    [{'params': parameters_bias, 'lr': 0.1/10.},\n                             {'params': parameters_scale, 'lr': 0.1/10.},\n                             {'params': parameters_others}],\n                    lr=0.1,\n                    momentum=0.9,\n                    weight_decay=5e-4)\n\n        scheduler = SchedulerWrapperLossOnPlateau(optimizer)\n        loss = nn.CrossEntropyLoss()\n        metrics_calculator = MetricsCalculatorAccuracy()\n        trainer_cls = TrainerClassification\n\n        if transforms is None:\n            transforms = Compose([ToTensor(), Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n\n        train_dataset = get_dataset(path=TRAIN_DATASET_PATH, transforms=transforms, train=True, use_mixup=use_mixup)\n        val_dataset = get_dataset(path=TEST_DATASET_PATH, transforms=transforms, train=False, use_mixup=use_mixup)\n\n\n        if use_mixup:\n            train_dataset = MixUpDatasetWrapper(train_dataset, alpha=0.7)\n            loss = VectorCrossEntropy()\n\n        super().__init__(\n            model=model,\n            model_save_path=model_save_path,\n            optimizer=optimizer,\n            scheduler=scheduler,\n            loss=loss,\n            metrics_calculator=metrics_calculator,\n            batch_size=batch_size,\n            num_workers=num_workers,\n            train_dataset=train_dataset,\n            val_dataset=val_dataset,\n            trainer_cls=trainer_cls,\n            print_frequency=print_frequency,\n            epoch_count=epoch_count)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/resnet110_bn.py",
    "content": "from .base import ConfigCIFARBase\n\nfrom cifar_pipeline.resnet_cifar import resnet110\n\nfrom torch.nn import DataParallel\n\nMODEL_SAVE_PATH = \"models/cifar_resnet110_bn\"\n\n\nclass Config(ConfigCIFARBase):\n    def __init__(self):\n        model = resnet110(use_fixup=False)\n\n        super().__init__(model=DataParallel(model), model_save_path=MODEL_SAVE_PATH,\n                         epoch_count=100, batch_size=128)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/resnet110_fixup.py",
    "content": "from .base import ConfigCIFARBase\n\nfrom cifar_pipeline.resnet_cifar import resnet110\n\nfrom torch.nn import DataParallel\n\nMODEL_SAVE_PATH = \"models/cifar_resnet110_fixup\"\n\n\nclass Config(ConfigCIFARBase):\n    def __init__(self):\n        model = resnet110(use_fixup=True)\n\n        super().__init__(model=DataParallel(model), model_save_path=MODEL_SAVE_PATH,\n                         epoch_count=100, batch_size=128)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/resnet110_fixup_0_0_1.py",
    "content": "from .base import ConfigCIFARBase\n\nfrom cifar_pipeline.resnet_cifar import resnet110\n\nfrom torch.nn import DataParallel\n\nMODEL_SAVE_PATH = \"models/cifar_resnet110_fixup_0_0_1\"\n\n\nclass Config(ConfigCIFARBase):\n    def __init__(self):\n        model = resnet110(use_fixup=True, fixup_coeff=0.01)\n\n        super().__init__(model=DataParallel(model), model_save_path=MODEL_SAVE_PATH,\n                         epoch_count=100, batch_size=128)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/resnet110_fixup_0_1.py",
    "content": "from .base import ConfigCIFARBase\n\nfrom cifar_pipeline.resnet_cifar import resnet110\n\nfrom torch.nn import DataParallel\n\nMODEL_SAVE_PATH = \"models/cifar_resnet110_fixup_0_1\"\n\n\nclass Config(ConfigCIFARBase):\n    def __init__(self):\n        model = resnet110(use_fixup=True, fixup_coeff=0.1)\n\n        super().__init__(model=DataParallel(model), model_save_path=MODEL_SAVE_PATH,\n                         epoch_count=100, batch_size=128)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/resnet110_fixup_mixup.py",
    "content": "from .base import ConfigCIFARBase\n\nfrom cifar_pipeline.resnet_cifar import resnet110\n\nfrom torch.nn import DataParallel\n\nMODEL_SAVE_PATH = \"models/cifar_resnet110_fixup_mixup\"\n\n\nclass Config(ConfigCIFARBase):\n    def __init__(self):\n        model = resnet110(use_fixup=True)\n\n        super().__init__(model=DataParallel(model), model_save_path=MODEL_SAVE_PATH,\n                         epoch_count=100, batch_size=128, use_mixup=True)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/base.py",
    "content": "from ..base import ConfigCIFARBase\n\nfrom pipeline.models.image_models.wide_resnet_fixup import WideResNet as WideResNetFixup\nfrom pipeline.models.image_models.wide_resnet import WideResNet as WideResNetBatchNorm\n\nfrom enum import auto\nfrom torch.nn import DataParallel\n\nMODEL_SAVE_PATH = \"models/cifar_wideresnet_{}_{}_layers\"\n\n\nclass ConfigWideResNetBase(ConfigCIFARBase):\n    BATCH_NORM = auto()\n    FIXUP = auto()\n\n    def __init__(self, num_layers, fixup_coeff=1, normalization_type=BATCH_NORM, batch_size=128):\n        if normalization_type == self.BATCH_NORM:\n            model = WideResNetBatchNorm(depth=num_layers, num_classes=10)\n            norm_type = \"batchnorm\"\n        else:\n            model = WideResNetFixup(depth=num_layers, num_classes=10, fixup_coeff=fixup_coeff)\n            norm_type = \"fixup_coeff_{}\".format(fixup_coeff)\n\n        super().__init__(model=DataParallel(model), model_save_path=MODEL_SAVE_PATH.format(norm_type, num_layers),\n                         epoch_count=1, batch_size=batch_size)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/batch_norm/10000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10000, normalization_type=ConfigWideResNetBase.BATCH_NORM, batch_size=64)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/batch_norm/1000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=1000, normalization_type=ConfigWideResNetBase.BATCH_NORM)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/batch_norm/100_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=100, normalization_type=ConfigWideResNetBase.BATCH_NORM)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/batch_norm/10_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10, normalization_type=ConfigWideResNetBase.BATCH_NORM)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/batch_norm/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup/10000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10000, normalization_type=ConfigWideResNetBase.FIXUP, batch_size=64)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup/1000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=1000, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup/100_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=100, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup/10_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0/10000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10000, fixup_coeff=0, normalization_type=ConfigWideResNetBase.FIXUP, batch_size=64)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0/1000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=1000, fixup_coeff=0, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0/100_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=100, fixup_coeff=0, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0/10_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10, fixup_coeff=0, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_0_1/10000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10000, fixup_coeff=0.01, normalization_type=ConfigWideResNetBase.FIXUP, batch_size=64)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_0_1/1000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=1000, fixup_coeff=0.01, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_0_1/100_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=100, fixup_coeff=0.01, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_0_1/10_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10, fixup_coeff=0.01, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_0_1/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_1/10000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10000, fixup_coeff=0.1, normalization_type=ConfigWideResNetBase.FIXUP, batch_size=64)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_1/1000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=1000, fixup_coeff=0.1, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_1/100_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=100, fixup_coeff=0.1, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_1/10_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10, fixup_coeff=0.1, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_0_1/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_10/10000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10000, fixup_coeff=10, normalization_type=ConfigWideResNetBase.FIXUP, batch_size=64)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_10/1000_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=1000, fixup_coeff=10, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_10/100_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=100, fixup_coeff=10, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_10/10_layers.py",
    "content": "from ..base import ConfigWideResNetBase\n\n\nclass Config(ConfigWideResNetBase):\n    def __init__(self):\n        super().__init__(num_layers=10, fixup_coeff=10, normalization_type=ConfigWideResNetBase.FIXUP)\n"
  },
  {
    "path": "cifar_pipeline/configs/fixup/wideresnet/fixup_10/__init__.py",
    "content": ""
  },
  {
    "path": "cifar_pipeline/configs/simple_cnn.py",
    "content": "import random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torchvision.transforms import ToTensor\n\nfrom pipeline.models.base import Flatten\nfrom .base import ConfigCIFARBase\n\nMODEL_SAVE_PATH = \"models/cifar_simple_cnn\"\nBATCH_SIZE = 128\n\nSEED = 85\nrandom.seed(SEED)\nnp.random.seed(SEED)\ntorch.random.manual_seed(SEED)\n\n\ndef get_model():\n    model = nn.Sequential(\n        nn.Conv2d(3, 16, kernel_size=3, padding=1),\n        nn.ReLU(),\n        nn.MaxPool2d(kernel_size=2),\n        nn.Conv2d(16, 64, kernel_size=3, padding=1),\n        nn.ReLU(),\n        nn.MaxPool2d(kernel_size=2),\n        nn.Conv2d(64, 128, kernel_size=3, padding=1),\n        nn.ReLU(),\n        nn.Conv2d(128, 128, kernel_size=3, padding=1),\n        nn.ReLU(),\n        nn.AdaptiveAvgPool2d(1),\n        Flatten(),\n        nn.Linear(128, 10)\n    )\n    return model\n\n\nclass Config(ConfigCIFARBase):\n    def __init__(self):\n        model = get_model()\n        transforms = ToTensor()\n        super().__init__(model=model, model_save_path=MODEL_SAVE_PATH,\n                         epoch_count=2, batch_size=BATCH_SIZE, transforms=transforms)\n"
  },
  {
    "path": "cifar_pipeline/dataset.py",
    "content": "import torch.utils.data as data\nfrom torchvision.datasets.cifar import CIFAR10\n\n\nclass CIFARDataset(data.Dataset):\n    def __init__(self, path, download=True, train=True):\n        self._dataset = CIFAR10(path, download=download, train=train)\n\n    def get_image(self, item):\n        return self._dataset[item][0]\n\n    def get_class(self, item):\n        return self._dataset[item][1]\n\n    def __len__(self):\n        return len(self._dataset)\n\n    def __getitem__(self, item):\n        return self._dataset[item]\n\n\nclass CIFARImagesDataset(CIFARDataset):\n    def __getitem__(self, item):\n        return self.get_image(item)\n\n\nclass CIFARTargetsDataset(CIFARDataset):\n    def __getitem__(self, item):\n        return self.get_class(item)\n"
  },
  {
    "path": "cifar_pipeline/resnet_cifar.py",
    "content": "import torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nimport torch\nimport math\n\n\ndef _weights_init(m):\n    if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n        init.kaiming_normal(m.weight)\n\n\nclass LambdaLayer(nn.Module):\n    def __init__(self, lambd):\n        super(LambdaLayer, self).__init__()\n        self.lambd = lambd\n\n    def forward(self, x):\n        return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n    expansion = 1\n    m = 2\n\n    def __init__(self, in_planes, planes, stride=1, use_fixup=False, fixup_l=1, fixup_coeff=1):\n        super(BasicBlock, self).__init__()\n        self._use_fixup = use_fixup\n        self._fixup_l = fixup_l\n        self._fixup_coeff = fixup_coeff\n\n        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n        self.bn2 = nn.BatchNorm2d(planes)\n\n        self.shortcut = nn.Sequential()\n        if stride != 1 or in_planes != planes:\n            self.shortcut = LambdaLayer(lambda x:\n                                        F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), \"constant\", 0))\n\n        if use_fixup:\n            self.scale = nn.Parameter(torch.ones(1))\n            self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(4)])\n\n            k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels\n            self.conv1.weight.data.normal_(0, fixup_coeff * fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2. / k))\n            self.conv2.weight.data.zero_()\n\n    def forward(self, x):\n        if self._use_fixup:\n            out = F.relu(self.conv1(x + self.biases[0]) + self.biases[1])\n            out = self.scale * self.conv2(out + self.biases[2]) + self.biases[3]\n        else:\n            out = F.relu(self.bn1(self.conv1(x)))\n            out = self.bn2(self.conv2(out))\n        out += self.shortcut(x)\n        out = F.relu(out)\n        return out\n\n\nclass ResNet(nn.Module):\n    def __init__(self, block, num_blocks, num_classes=10, use_fixup=False, fixup_coeff=1):\n        super(ResNet, self).__init__()\n        self.in_planes = 16\n\n        fixup_l = sum(num_blocks)\n\n        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(16) if not use_fixup else nn.Sequential()\n        self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1,\n                                       use_fixup=use_fixup, fixup_l=fixup_l, fixup_coeff=fixup_coeff)\n        self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2,\n                                       use_fixup=use_fixup, fixup_l=fixup_l, fixup_coeff=fixup_coeff)\n        self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2,\n                                       use_fixup=use_fixup, fixup_l=fixup_l, fixup_coeff=fixup_coeff)\n        self.linear = nn.Linear(64, num_classes)\n\n        self.bias1 = nn.Parameter(torch.zeros(1))\n        self.bias2 = nn.Parameter(torch.zeros(1))\n        if not use_fixup:\n            self.apply(_weights_init)\n        else:\n            self.linear.weight.data.zero_()\n            self.linear.bias.data.zero_()\n\n            k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels\n            self.conv1.weight.data.normal_(0, math.sqrt(2. / k))\n\n    def _make_layer(self, block, planes, num_blocks, stride, use_fixup, fixup_l, fixup_coeff):\n        strides = [stride] + [1]*(num_blocks-1)\n        layers = []\n        for stride in strides:\n            layers.append(block(self.in_planes, planes, stride, use_fixup, fixup_l, fixup_coeff))\n            self.in_planes = planes * block.expansion\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        out = F.relu(self.bn1(self.conv1(x)) + self.bias1)\n        out = self.layer1(out)\n        out = self.layer2(out)\n        out = self.layer3(out)\n        out = F.avg_pool2d(out, out.size()[3])\n        out = out.view(out.size(0), -1)\n        out = self.linear(out + self.bias2)\n        return out\n\n\ndef resnet110(use_fixup=False, fixup_coeff=1):\n    return ResNet(BasicBlock, [18, 18, 18], use_fixup=use_fixup, fixup_coeff=fixup_coeff)\n"
  },
  {
    "path": "imagenet_pipeline/__init__.py",
    "content": ""
  },
  {
    "path": "imagenet_pipeline/configs/__init__.py",
    "content": ""
  },
  {
    "path": "imagenet_pipeline/configs/base.py",
    "content": "from imagenet_pipeline.dataset import ImageNetImagesDataset, ImageNetTargetsDataset\n\nfrom pipeline.config_base import ConfigBase\nfrom pipeline.schedulers.learning_rate.reduce_on_plateau import SchedulerWrapperLossOnPlateau\nfrom pipeline.metrics.accuracy import MetricsCalculatorAccuracy\nfrom pipeline.datasets.base import DatasetWithPostprocessingFunc, DatasetComposer, OneHotTargetsDataset\nfrom pipeline.trainers.classification import TrainerClassification\n\nfrom pipeline.datasets.mixup import MixUpDatasetWrapper\nfrom pipeline.losses.vector_cross_entropy import VectorCrossEntropy\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom torchvision.transforms import ToTensor, Compose, Normalize\n\n\nTRAIN_DATASET_PATH = \"~/train\"\nTEST_DATASET_PATH = \"~/val\"\n\n\ndef get_dataset(path, transforms, use_mixup):\n    images_dataset = DatasetWithPostprocessingFunc(\n        ImageNetImagesDataset(path=path),\n        transforms)\n\n    targets_dataset = ImageNetTargetsDataset(path=path)\n\n    if use_mixup:\n        targets_dataset = OneHotTargetsDataset(targets_dataset, 1000)\n    return DatasetComposer([images_dataset, targets_dataset])\n\n\nclass ConfigImageNetBase(ConfigBase):\n    def __init__(self, model, model_save_path, num_workers=16, batch_size=128, learning_rate=0.1, transforms=None, use_mixup=False):\n        parameters_bias = [p[1] for p in model.named_parameters() if 'bias' in p[0]]\n        parameters_scale = [p[1] for p in model.named_parameters() if 'scale' in p[0]]\n        parameters_others = [p[1] for p in model.named_parameters() if not ('bias' in p[0] or 'scale' in p[0])]\n\n        optimizer = optim.SGD(\n                    [{'params': parameters_bias, 'lr': learning_rate/10.},\n                             {'params': parameters_scale, 'lr': learning_rate/10.},\n                             {'params': parameters_others}],\n                    lr=learning_rate,\n                    momentum=0.9,\n                    weight_decay=5e-4)\n        scheduler = SchedulerWrapperLossOnPlateau(optimizer)\n        loss = nn.CrossEntropyLoss()\n        metrics_calculator = MetricsCalculatorAccuracy()\n        trainer_cls = TrainerClassification\n\n        if transforms is None:\n            transforms = Compose([ToTensor(), Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n\n        train_dataset = get_dataset(path=TRAIN_DATASET_PATH, transforms=transforms, use_mixup=use_mixup)\n        val_dataset = get_dataset(path=TEST_DATASET_PATH, transforms=transforms, use_mixup=use_mixup)\n        \n        if use_mixup:\n            train_dataset = MixUpDatasetWrapper(train_dataset, alpha=0.7)\n            loss = VectorCrossEntropy()\n\n        super().__init__(\n            model=model,\n            model_save_path=model_save_path,\n            optimizer=optimizer,\n            scheduler=scheduler,\n            loss=loss,\n            metrics_calculator=metrics_calculator,\n            batch_size=batch_size,\n            num_workers=num_workers,\n            train_dataset=train_dataset,\n            val_dataset=val_dataset,\n            trainer_cls=trainer_cls,\n            print_frequency=100)\n"
  },
  {
    "path": "imagenet_pipeline/configs/resnet101_fixup.py",
    "content": "from .base import ConfigImageNetBase\n\nfrom torch.nn import DataParallel\n\n\nfrom pipeline.models.image_models.resnet_fixup import resnet101\n\nMODEL_SAVE_PATH = \"models/imagenet_resnet_101_fixup\"\n\n\nclass Config(ConfigImageNetBase):\n    def __init__(self, model_save_path=MODEL_SAVE_PATH):\n        super().__init__(model=DataParallel(resnet101()), model_save_path=model_save_path, use_mixup=True, batch_size=128 * 8, learning_rate=0.1 * 8)\n"
  },
  {
    "path": "imagenet_pipeline/configs/resnet101_fixup_128.py",
    "content": "from .base import ConfigImageNetBase\n\nfrom torch.nn import DataParallel\n\n\nfrom pipeline.models.image_models.resnet_fixup import resnet101\n\nMODEL_SAVE_PATH = \"models/imagenet_resnet_101_fixup_128\"\n\n\nclass Config(ConfigImageNetBase):\n    def __init__(self, model_save_path=MODEL_SAVE_PATH):\n        super().__init__(model=DataParallel(resnet101()), model_save_path=model_save_path, use_mixup=True, batch_size=128, learning_rate=0.1)\n"
  },
  {
    "path": "imagenet_pipeline/configs/resnet50.py",
    "content": "from .base import ConfigImageNetBase\n\nfrom torch.nn import DataParallel\n\nfrom torchvision.models import resnet50\n\nMODEL_SAVE_PATH = \"models/imagenet_resnet_50\"\n\n\nclass Config(ConfigImageNetBase):\n    def __init__(self, model_save_path=MODEL_SAVE_PATH):\n        super().__init__(model=DataParallel(resnet50()), model_save_path=model_save_path)\n"
  },
  {
    "path": "imagenet_pipeline/configs/resnet50_fixup.py",
    "content": "from .base import ConfigImageNetBase\n\nfrom torch.nn import DataParallel\n\n\nfrom pipeline.models.image_models.resnet_fixup import resnet50\n\nMODEL_SAVE_PATH = \"models/imagenet_resnet_50_fixup\"\n\n\nclass Config(ConfigImageNetBase):\n    def __init__(self, model_save_path=MODEL_SAVE_PATH):\n        super().__init__(model=DataParallel(resnet50()), model_save_path=model_save_path, use_mixup=True, batch_size=128 * 7, learning_rate=0.1 * 7)\n"
  },
  {
    "path": "imagenet_pipeline/configs/resnet50_fixup_128.py",
    "content": "from .base import ConfigImageNetBase\n\nfrom torch.nn import DataParallel\n\n\nfrom pipeline.models.image_models.resnet_fixup import resnet50\n\nMODEL_SAVE_PATH = \"models/imagenet_resnet_50_fixup_128\"\n\n\nclass Config(ConfigImageNetBase):\n    def __init__(self, model_save_path=MODEL_SAVE_PATH):\n        super().__init__(model=DataParallel(resnet50()), model_save_path=model_save_path, use_mixup=True, batch_size=128, learning_rate=0.1)\n"
  },
  {
    "path": "imagenet_pipeline/dataset.py",
    "content": "from pipeline.core import PipelineError\nfrom pipeline.utils import get_path\n\nfrom PIL import Image\n\nimport torch.utils.data as data\n\nimport os\nimport glob\n\nIMAGE_SIZE = (224, 224)\n\n\nclass ImageNetDataset(data.Dataset):\n    def __init__(self, path):\n        path = get_path(path)\n        if not os.path.exists(path):\n            raise PipelineError(\"Path {} does not exist\".format(path))\n\n        self._paths = sorted(glob.glob(os.path.join(path, \"*/*.JPEG\")))\n\n        classes = set()\n        for path in self._paths:\n            class_name = os.path.basename(os.path.dirname(path))\n            classes.add(class_name)\n\n        classes = sorted(list(classes))\n        self._class_to_id = dict((class_name, i) for i, class_name in enumerate(classes))\n\n    def get_image(self, item):\n        path = self._paths[item]\n        image = Image.open(path).resize(IMAGE_SIZE).convert(\"RGB\")\n        return image\n\n    def get_class(self, item):\n        path = self._paths[item]\n        class_name = os.path.basename(os.path.dirname(path))\n        result = self._class_to_id[class_name]\n        return result\n\n    def __len__(self):\n        return len(self._paths)\n\n    def __getitem__(self, item):\n        return self.get_image(item), self.get_class(item)\n\n\nclass ImageNetImagesDataset(ImageNetDataset):\n    def __getitem__(self, item):\n        return self.get_image(item)\n\n\nclass ImageNetTargetsDataset(ImageNetDataset):\n    def __getitem__(self, item):\n        return self.get_class(item)\n"
  },
  {
    "path": "mnist_pipeline/__init__.py",
    "content": ""
  },
  {
    "path": "mnist_pipeline/configs/__init__.py",
    "content": ""
  },
  {
    "path": "mnist_pipeline/configs/base.py",
    "content": "from mnist_pipeline.dataset import MNISTImagesDataset, MNISTTargetsDataset\n\nfrom pipeline.config_base import ConfigBase, PredictConfigBase\nfrom pipeline.schedulers.learning_rate.reduce_on_plateau import SchedulerWrapperLossOnPlateau\nfrom pipeline.metrics.accuracy import MetricsCalculatorAccuracy\nfrom pipeline.datasets.base import DatasetWithPostprocessingFunc, DatasetComposer\nfrom pipeline.trainers.classification import TrainerClassification\nfrom pipeline.predictors.classification import PredictorClassification\n\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom torchvision.transforms import ToTensor\n\n\nTRAIN_DATASET_PATH = \"~/.pipeline/mnist/train.csv\"\nTEST_DATASET_PATH = \"~/.pipeline/mnist/test.csv\"\n\nVAL_RATIO = 0.2\n\n\ndef get_dataset(mode, transforms):\n    images_dataset = DatasetWithPostprocessingFunc(\n        MNISTImagesDataset(path=TRAIN_DATASET_PATH, mode=mode, val_ratio=VAL_RATIO),\n        transforms)\n\n    targets_dataset = MNISTTargetsDataset(\n        path=TRAIN_DATASET_PATH, mode=mode, val_ratio=VAL_RATIO)\n\n    return DatasetComposer([images_dataset, targets_dataset])\n\n\nclass ConfigMNISTBase(ConfigBase):\n    def __init__(self, model, model_save_path, num_workers=4, batch_size=128, transforms=None):\n        optimizer = optim.Adam(model.parameters())\n        scheduler = SchedulerWrapperLossOnPlateau(optimizer)\n        loss = nn.CrossEntropyLoss()\n        metrics_calculator = MetricsCalculatorAccuracy()\n        trainer_cls = TrainerClassification\n\n        if transforms is None:\n            transforms = ToTensor()\n\n        train_dataset = get_dataset(mode=MNISTImagesDataset.MODE_TRAIN, transforms=transforms)\n        val_dataset = get_dataset(mode=MNISTImagesDataset.MODE_VAL, transforms=transforms)\n\n        super().__init__(\n            model=model,\n            model_save_path=model_save_path,\n            optimizer=optimizer,\n            scheduler=scheduler,\n            loss=loss,\n            metrics_calculator=metrics_calculator,\n            batch_size=batch_size,\n            num_workers=num_workers,\n            train_dataset=train_dataset,\n            val_dataset=val_dataset,\n            trainer_cls=trainer_cls)\n\n\nclass PredictConfigMNISTBase(PredictConfigBase):\n    def __init__(self, model, model_save_path, num_workers=4, batch_size=128):\n        predictor_cls = PredictorClassification\n\n        images_dataset = DatasetWithPostprocessingFunc(\n            MNISTImagesDataset(path=TRAIN_DATASET_PATH, mode=MNISTImagesDataset.MODE_VAL, val_ratio=VAL_RATIO),\n            ToTensor())\n\n        dataset = DatasetComposer([images_dataset, list(range(len(images_dataset)))])\n\n        super().__init__(\n            model=model,\n            model_save_path=model_save_path,\n            dataset=dataset,\n            predictor_cls=predictor_cls,\n            num_workers=num_workers,\n            batch_size=batch_size)\n"
  },
  {
    "path": "mnist_pipeline/configs/resnet18.py",
    "content": "from .base import ConfigMNISTBase\n\nfrom pipeline.models.image_models.encoders.resnet import Resnet18FeatureExtractor\n\nimport torch.nn as nn\n\n\nclass Config(ConfigMNISTBase):\n    def __init__(self, model_save_path=\"models/resnet18\"):\n        model = nn.Sequential(\n            Resnet18FeatureExtractor(input_channels=1),\n            nn.Linear(Resnet18FeatureExtractor.NUM_FEATURES, 10)\n        )\n\n        super().__init__(model=model, model_save_path=model_save_path)\n"
  },
  {
    "path": "mnist_pipeline/configs/simple_cnn.py",
    "content": "from .base import ConfigMNISTBase, PredictConfigMNISTBase\n\nfrom pipeline.models.base import Flatten\n\nimport torch.nn as nn\n\n\nMODEL_SAVE_PATH = \"models/simple_cnn\"\n\n\ndef get_model():\n    model = nn.Sequential(\n        nn.Conv2d(1, 16, kernel_size=3, padding=1),\n        nn.ReLU(),\n        nn.MaxPool2d(kernel_size=2),\n        nn.Conv2d(16, 64, kernel_size=3, padding=1),\n        nn.ReLU(),\n        nn.MaxPool2d(kernel_size=2),\n        nn.Conv2d(64, 128, kernel_size=3, padding=1),\n        nn.ReLU(),\n        nn.Conv2d(128, 128, kernel_size=3, padding=1),\n        nn.ReLU(),\n        nn.AdaptiveAvgPool2d(1),\n        Flatten(),\n        nn.Linear(128, 10)\n    )\n    return model\n\n\nclass Config(ConfigMNISTBase):\n    def __init__(self, model_save_path=MODEL_SAVE_PATH):\n        super().__init__(model=get_model(), model_save_path=model_save_path)\n\n\nclass PredictConfig(PredictConfigMNISTBase):\n    def __init__(self, model_save_path=MODEL_SAVE_PATH):\n        super().__init__(model=get_model(), model_save_path=model_save_path)\n"
  },
  {
    "path": "mnist_pipeline/dataset.py",
    "content": "from pipeline.core import PipelineError\nfrom pipeline.utils import get_path\n\nimport torch.utils.data as data\nfrom enum import auto\n\nimport os\nimport pandas as pd\n\n\nclass MNISTDataset(data.Dataset):\n    MODE_TRAIN = auto()\n    MODE_VAL = auto()\n\n    def __init__(self, path, mode, val_ratio):\n        path = get_path(path)\n        if not os.path.exists(path):\n            raise PipelineError(\"Path {} does not exist\".format(path))\n\n        dataset = pd.read_csv(path).values\n        train_length = int(len(dataset) * (1 - val_ratio))\n        if mode == self.MODE_TRAIN:\n            dataset = dataset[:train_length]\n        else:\n            dataset = dataset[train_length:]\n\n        self._dataset = dataset\n\n    def __len__(self):\n        return len(self._dataset)\n\n    def __getitem__(self, item):\n        row = self._dataset[item]\n\n        image = row[1:].reshape(28, 28, 1).astype(\"uint8\")\n        target = int(row[0])\n        return image, target\n\n\nclass MNISTImagesDataset(MNISTDataset):\n    def __init__(self, path, mode, val_ratio):\n        super().__init__(path, mode, val_ratio)\n\n    def __getitem__(self, item):\n        image, _ = super().__getitem__(item)\n        return image\n\n\nclass MNISTTargetsDataset(MNISTDataset):\n    def __init__(self, path, mode, val_ratio):\n        super().__init__(path, mode, val_ratio)\n\n    def __getitem__(self, item):\n        _, target = super().__getitem__(item)\n        return target\n"
  },
  {
    "path": "mnist_pipeline/tests/__init__.py",
    "content": ""
  },
  {
    "path": "mnist_pipeline/tests/test_dataset.py",
    "content": "from mnist_pipeline.dataset import MNISTDataset, MNISTImagesDataset, MNISTTargetsDataset\nfrom mnist_pipeline.configs.base import TRAIN_DATASET_PATH\n\nfrom pipeline.utils import get_path\n\nimport os\n\n\nclass TestMNISTDataset:\n    def setup(self):\n        assert os.path.exists(get_path(TRAIN_DATASET_PATH)), \"You need to download MNIST dataset first\"\n\n    def test_train_dataset(self):\n        dataset = MNISTDataset(TRAIN_DATASET_PATH, mode=MNISTDataset.MODE_TRAIN, val_ratio=0.2)\n        assert len(dataset) == 33600\n\n        _, _ = dataset[33599]\n        image, target = dataset[0]\n\n        assert 0 <= target < 10\n\n        assert image.shape == (28, 28, 1)\n\n    def test_val_dataset(self):\n        dataset = MNISTDataset(TRAIN_DATASET_PATH, mode=MNISTDataset.MODE_VAL, val_ratio=0.2)\n        assert len(dataset) == 8400\n\n        _, _ = dataset[8399]\n        image, target = dataset[0]\n\n        assert 0 <= target < 10\n\n        assert image.shape == (28, 28, 1)\n\n        dataset = MNISTDataset(TRAIN_DATASET_PATH, mode=MNISTDataset.MODE_VAL, val_ratio=0)\n        assert len(dataset) == 0\n\n    def test_images_dataset(self):\n        dataset = MNISTImagesDataset(TRAIN_DATASET_PATH, mode=MNISTDataset.MODE_VAL, val_ratio=1)\n\n        image = dataset[10]\n        assert image.shape == (28, 28, 1)\n\n        assert image.min() >= 0\n        assert 1 <= image.max() <= 255\n\n    def test_targets_dataset(self):\n        dataset = MNISTTargetsDataset(TRAIN_DATASET_PATH, mode=MNISTDataset.MODE_TRAIN, val_ratio=0.5234)\n\n        target = dataset[51]\n\n        assert 0 <= target <= 9\n\n        assert type(target) == int\n"
  },
  {
    "path": "mnist_pipeline/tests/test_train.py",
    "content": "from mnist_pipeline.configs.simple_cnn import Config, PredictConfig\n\nfrom pipeline.utils import run_train, run_predict\nimport tempfile\nimport shutil\nimport os\nimport hashlib\n\n\nclass TestMNISTTrain:\n    def test_mnist_train(self):\n        test_path = tempfile.mkdtemp()\n        config = Config(model_save_path=test_path)\n        config.epoch_count = 2\n        run_train(config)\n\n        assert os.path.exists(os.path.join(test_path, \"log.txt\"))\n        assert os.path.exists(os.path.join(test_path, \"epoch_0\"))\n        assert os.path.exists(os.path.join(test_path, \"epoch_1\"))\n        assert not os.path.exists(os.path.join(test_path, \"epoch_2\"))\n        assert os.path.exists(os.path.join(test_path, \"state\"))\n\n        with open(os.path.join(test_path, \"epoch_1\"), \"rb\") as fin:\n            model_checkpoint_hash = hashlib.md5(fin.read()).hexdigest()\n\n        run_train(config)\n\n        with open(os.path.join(test_path, \"epoch_1\"), \"rb\") as fin:\n            new_model_checkpoint_hash = hashlib.md5(fin.read()).hexdigest()\n\n        assert model_checkpoint_hash == new_model_checkpoint_hash\n        assert not os.path.exists(os.path.join(test_path, \"epoch_2\"))\n\n        predict_config = PredictConfig(model_save_path=test_path)\n        run_predict(predict_config)\n\n        assert os.path.exists(os.path.join(test_path, \"predictions\", \"predictions\"))\n        assert os.path.exists(os.path.join(test_path, \"predictions\", \"identifiers\"))\n\n        shutil.rmtree(test_path)\n"
  },
  {
    "path": "pipeline/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/config_base.py",
    "content": "from .datasets.base import EmptyDataset\nfrom .metrics.base import MetricsCalculatorEmpty\nfrom pipeline.schedulers.base import SchedulerWrapperIdentity\nfrom .storage.state import StateStorageFile\nfrom .storage.predictions import PredictionsStorageFiles\n\nimport torch\nimport os\n\n\nclass ConfigBase:\n    def __init__(\n            self,\n            model,\n            model_save_path,\n            train_dataset,\n            optimizer,\n            loss,\n            trainer_cls,\n            device=None,\n            val_dataset=None,\n            scheduler=None,\n            metrics_calculator=None,\n            batch_size=1,\n            num_workers=0,\n            epoch_count=None,\n            print_frequency=1,\n            state_storage=None):\n\n        if val_dataset is None:\n            val_dataset = EmptyDataset()\n\n        if scheduler is None:\n            scheduler = SchedulerWrapperIdentity()\n\n        if metrics_calculator is None:\n            metrics_calculator = MetricsCalculatorEmpty()\n\n        if device is None:\n            device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n        if state_storage is None:\n            state_storage = StateStorageFile(os.path.join(model_save_path, \"state\"))\n\n        self.model = model\n        self.model_save_path = model_save_path\n        self.train_dataset = train_dataset\n        self.val_dataset = val_dataset\n        self.batch_size = batch_size\n        self.num_workers = num_workers\n        self.scheduler = scheduler\n        self.metrics_calculator = metrics_calculator\n        self.loss = loss\n        self.optimizer = optimizer\n        self.epoch_count = epoch_count\n        self.print_frequency = print_frequency\n        self.trainer_cls = trainer_cls\n        self.device = device\n        self.state_storage = state_storage\n\n\nclass PredictConfigBase:\n    def __init__(\n            self,\n            model,\n            model_save_path,\n            dataset,\n            predictor_cls,\n            device=None,\n            batch_size=1,\n            num_workers=0,\n            print_frequency=1,\n            predictions_storage=None):\n        if device is None:\n            device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n        if predictions_storage is None:\n            predictions_storage = PredictionsStorageFiles(os.path.join(model_save_path, \"predictions\"))\n\n        self.model = model\n        self.dataset = dataset\n        self.model_save_path = model_save_path\n        self.batch_size = batch_size\n        self.num_workers = num_workers\n        self.print_frequency = print_frequency\n        self.predictor_cls = predictor_cls\n        self.device = device\n        self.predictions_storage = predictions_storage\n"
  },
  {
    "path": "pipeline/core.py",
    "content": "class PipelineError(Exception):\n    pass\n"
  },
  {
    "path": "pipeline/datasets/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/datasets/base.py",
    "content": "import torch.utils.data as data\nimport torch\n\nfrom typing import Sequence\n\n\nclass EmptyDataset(data.Dataset):\n    def __len__(self):\n        return 0\n\n    def __getitem__(self, item: int):\n        assert False, \"This code is unreachable\"\n\n\nclass DatasetComposer(data.Dataset):\n    def __init__(self, datasets: Sequence):\n        self._datasets = datasets\n        self._dataset_length = len(datasets[0])\n        for dataset in datasets:\n            assert self._dataset_length == len(dataset)\n\n    def __len__(self):\n        return self._dataset_length\n\n    def __getitem__(self, item: int):\n        return tuple(dataset[item] for dataset in self._datasets)\n\n\nclass OneHotTargetsDataset(data.Dataset):\n    def __init__(self, targets: Sequence, class_count: int):\n        self._targets = targets\n        self._class_count = class_count\n\n    def __len__(self):\n        return len(self._targets)\n\n    def __getitem__(self, item: int):\n        target = self._targets[item]\n        result = torch.zeros(self._class_count, dtype=torch.float32)\n        result[target] = 1\n        return result\n\n\nclass MultiLabelTargetsDataset(data.Dataset):\n    def __init__(self, targets: Sequence, class_count: int):\n        self._targets = targets\n        self._class_count = class_count\n\n    def __len__(self):\n        return len(self._targets)\n\n    def __getitem__(self, item: int):\n        target = self._targets[item]\n        result = torch.zeros(self._class_count, dtype=torch.float32)\n\n        for class_id in target:\n            result[class_id] = 1\n\n        return result\n\n\nclass DatasetWithPostprocessingFunc(data.Dataset):\n    def __init__(self, dataset, postprocessing_func):\n        self._dataset = dataset\n        self._postprocessing_func = postprocessing_func\n\n    def __len__(self):\n        return len(self._dataset)\n\n    def __getitem__(self, item):\n        return self._postprocessing_func(self._dataset[item])\n"
  },
  {
    "path": "pipeline/datasets/mixup.py",
    "content": "import torch.utils.data as data\nimport random\nimport numpy as np\n\n\nclass MixUpDatasetWrapper(data.Dataset):\n    def __init__(self, dataset, alpha=1):\n        super().__init__()\n        self._dataset = dataset\n        self._alpha = alpha\n\n    def __len__(self):\n        return len(self._dataset)\n\n    def __getitem__(self, item):\n        first = self._dataset[item]\n        second = random.choice(self._dataset)\n\n        coeff = np.random.beta(self._alpha, self._alpha)\n\n        result = []\n        for elem1, elem2 in zip(first, second):\n            result.append(elem1 * coeff + elem2 * (1 - coeff))\n\n        return tuple(result)\n"
  },
  {
    "path": "pipeline/logger.py",
    "content": "import logging\nimport sys\n\n\nLOGGER = logging.getLogger()\nFORMATTER = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n\n\ndef setup_logger(out_file=None, stderr=True, stderr_level=logging.INFO, file_level=logging.DEBUG):\n    LOGGER.handlers = []\n    LOGGER.setLevel(min(stderr_level, file_level))\n\n    if stderr:\n        handler = logging.StreamHandler(sys.stderr)\n        handler.setFormatter(FORMATTER)\n        handler.setLevel(stderr_level)\n        LOGGER.addHandler(handler)\n\n    if out_file is not None:\n        handler = logging.FileHandler(out_file)\n        handler.setFormatter(FORMATTER)\n        handler.setLevel(file_level)\n        LOGGER.addHandler(handler)\n\n    LOGGER.info(\"logger set up\")\n    return LOGGER\n"
  },
  {
    "path": "pipeline/losses/vector_cross_entropy.py",
    "content": "import torch\nimport torch.nn as nn\n\n\nclass VectorCrossEntropy(nn.Module):\n    def __init__(self):\n        super().__init__()\n        self._log_softmax = nn.LogSoftmax(dim=1)\n\n    def forward(self, input, target):\n        input = self._log_softmax(input)\n        loss = -torch.sum(input * target)\n        loss = loss / input.shape[0]\n        return loss\n\n"
  },
  {
    "path": "pipeline/metrics/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/metrics/accuracy.py",
    "content": "from .base import MetricsCalculatorBase\nfrom ..core import PipelineError\n\nfrom sklearn.metrics import accuracy_score\n\nimport numpy as np\n\n\nclass MetricsCalculatorAccuracy(MetricsCalculatorBase):\n    def __init__(self, border=0.5):\n        super().__init__()\n        self.zero_cache()\n        self._border = border\n\n    def zero_cache(self):\n        self._predictions = []\n        self._true_labels = []\n\n    def add(self, y_predicted, y_true):\n        self._predictions.append(y_predicted.cpu().data.numpy())\n        self._true_labels.append(y_true.cpu().data.numpy())\n\n    def calculate(self):\n        if not self._predictions:\n            raise PipelineError(\"You need to add predictions for calculating the accuracy first\")\n\n        y_pred = np.concatenate(self._predictions)\n        y_true = np.concatenate(self._true_labels)\n\n        if y_pred.shape[-1] == 1:\n            # Binary classification\n            y_pred = (y_pred >= self._border).astype(\"int\")\n        else:\n            y_pred = np.argmax(y_pred, -1)\n\n        if len(y_true.shape) != 1:\n            y_true = np.argmax(y_true, -1)\n\n        result = accuracy_score(y_true, y_pred)\n        return {\"accuracy\": result}\n"
  },
  {
    "path": "pipeline/metrics/base.py",
    "content": "import abc\n\n\nclass MetricsCalculatorBase(abc.ABC):\n    @abc.abstractmethod\n    def zero_cache(self):\n        pass\n\n    @abc.abstractmethod\n    def add(self, y_predicted, y_true):\n        pass\n\n    @abc.abstractmethod\n    def calculate(self):\n        pass\n\n\nclass MetricsCalculatorEmpty(MetricsCalculatorBase):\n    def zero_cache(self):\n        pass\n\n    def add(self, y_predicted, y_true):\n        pass\n\n    def calculate(self):\n        return {}\n"
  },
  {
    "path": "pipeline/models/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/models/base.py",
    "content": "import torch.nn as nn\n\n\nclass Flatten(nn.Module):\n    def forward(self, x):\n        return x.view(x.shape[0], -1)\n"
  },
  {
    "path": "pipeline/models/image_models/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/models/image_models/encoders/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/models/image_models/encoders/resnet.py",
    "content": "from torchvision.models import resnet\nimport torch.nn as nn\n\n\nclass ResnetModelFeatureExtractorBase(nn.Module):\n    def __init__(self, model, input_channels):\n        super().__init__()\n\n        model.fc = nn.Sequential()\n        model.avgpool = nn.AdaptiveAvgPool2d(1)\n\n        if input_channels != 3:\n            model.conv1 = nn.Conv2d(\n                input_channels,\n                model.conv1.out_channels,\n                kernel_size=model.conv1.kernel_size,\n                stride=model.conv1.stride,\n                padding=model.conv1.padding,\n                bias=model.conv1.bias)\n\n        self._model = model\n\n    def forward(self, input):\n        return self._model(input)\n\n\nclass Resnet18FeatureExtractor(ResnetModelFeatureExtractorBase):\n    NUM_FEATURES = 512\n\n    def __init__(self, pretrained=True, input_channels=3):\n        model = resnet.resnet18(pretrained=pretrained)\n        super().__init__(\n            model=model,\n            input_channels=input_channels)\n\n\nclass Resnet34FeatureExtractor(ResnetModelFeatureExtractorBase):\n    NUM_FEATURES = 512\n\n    def __init__(self, pretrained=True, input_channels=3):\n        model = resnet.resnet34(pretrained=pretrained)\n        super().__init__(\n            model=model,\n            input_channels=input_channels)\n\n\nclass Resnet50FeatureExtractor(ResnetModelFeatureExtractorBase):\n    NUM_FEATURES = 2048\n\n    def __init__(self, pretrained=True, input_channels=3):\n        model = resnet.resnet50(pretrained=pretrained)\n        super().__init__(\n            model=model,\n            input_channels=input_channels)\n\n\nclass Resnet101FeatureExtractor(ResnetModelFeatureExtractorBase):\n    NUM_FEATURES = 2048\n\n    def __init__(self, pretrained=True, input_channels=3):\n        model = resnet.resnet101(pretrained=pretrained)\n        super().__init__(\n            model=model,\n            input_channels=input_channels)\n\n\nclass Resnet152FeatureExtractor(ResnetModelFeatureExtractorBase):\n    NUM_FEATURES = 2048\n\n    def __init__(self, pretrained=True, input_channels=3):\n        model = resnet.resnet152(pretrained=pretrained)\n        super().__init__(\n            model=model,\n            input_channels=input_channels)\n"
  },
  {
    "path": "pipeline/models/image_models/resnet_fixup.py",
    "content": "import torch.nn as nn\nimport math\nimport torch\n\n\nclass Bottleneck(nn.Module):\n    expansion = 4\n    m = 3\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, fixup_l=1):\n        super(Bottleneck, self).__init__()\n        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n                               padding=1, bias=False)\n        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n\n        self.scale = nn.Parameter(torch.ones(1))\n        self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(6)])\n\n        k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels\n        self.conv1.weight.data.normal_(0, fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2. / k))\n\n        k = self.conv2.kernel_size[0] * self.conv2.kernel_size[1] * self.conv2.out_channels\n        self.conv2.weight.data.normal_(0, fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2. / k))\n        self.conv3.weight.data.zero_()\n\n        if downsample is not None:\n            k = self.downsample.kernel_size[0] * self.downsample.kernel_size[1] * self.downsample.out_channels\n            self.downsample.weight.data.normal_(0, math.sqrt(2. / k))\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x + self.biases[0])\n        out = self.relu(out + self.biases[1])\n\n        out = self.conv2(out + self.biases[2])\n        out = self.relu(out + self.biases[3])\n\n        out = self.scale * self.conv3(out + self.biases[4]) + self.biases[5]\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass ResNet(nn.Module):\n\n    def __init__(self, block, layers, num_classes=1000, input_channels=3):\n        self.inplanes = 64\n        super(ResNet, self).__init__()\n        self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,\n                               bias=False)\n        self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n        fixup_l = sum(layers)\n        self.layer1 = self._make_layer(block, 64, layers[0], fixup_l=fixup_l)\n        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, fixup_l=fixup_l)\n        self.layer3 = self._make_layer(block, 256, layers[2], stride=2, fixup_l=fixup_l)\n        self.layer4 = self._make_layer(block, 512, layers[3], stride=2, fixup_l=fixup_l)\n        self.avgpool = nn.AvgPool2d(7, stride=1)\n        self.bias1 = nn.Parameter(torch.zeros(1))\n        self.bias2 = nn.Parameter(torch.zeros(1))\n        self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n        self.fc.weight.data.zero_()\n        self.fc.bias.data.zero_()\n\n        n = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels\n        self.conv1.weight.data.normal_(0, math.sqrt(2. / n))\n\n    def _make_layer(self, block, planes, blocks, fixup_l, stride=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Conv2d(self.inplanes, planes * block.expansion,\n                                   kernel_size=1, stride=stride, bias=True)\n\n        layers = []\n        layers.append(block(self.inplanes, planes, stride, downsample, fixup_l=fixup_l))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes, fixup_l=fixup_l))\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        x = self.conv1(x)\n        x = self.relu(x + self.bias1)\n        x = self.maxpool(x)\n\n        x = self.layer1(x)\n        x = self.layer2(x)\n        x = self.layer3(x)\n        x = self.layer4(x)\n\n        x = self.avgpool(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc(x + self.bias2)\n\n        return x\n\n\ndef resnet50(**kwargs):\n    model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n    return model\n\n\ndef resnet101(**kwargs):\n    model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n    return model\n\n\ndef resnet152(**kwargs):\n    model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n    return model\n"
  },
  {
    "path": "pipeline/models/image_models/wide_resnet.py",
    "content": "\"\"\"\nWide ResNet by Sergey Zagoruyko and Nikos Komodakis\nFixup initialization by Hongyi Zhang, Yann N. Dauphin, Tengyu Ma\nBased on code by xternalz and Andy Brock:\nhttps://github.com/xternalz/WideResNet-pytorch\nhttps://github.com/ajbrock/BoilerPlate\n\"\"\"\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n    def __init__(self, in_planes, out_planes, stride, dropout=0.0):\n        super(BasicBlock, self).__init__()\n        self.bn1 = nn.BatchNorm2d(in_planes)\n        self.relu1 = nn.ReLU(inplace=True)\n        self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n                               padding=1, bias=False)\n        self.bn2 = nn.BatchNorm2d(out_planes)\n        self.relu2 = nn.ReLU(inplace=True)\n        self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,\n                               padding=1, bias=False)\n        self.dropout = dropout\n        self.equalInOut = (in_planes == out_planes)\n        self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n                               padding=0, bias=False) or None\n    def forward(self, x):\n        if not self.equalInOut:\n            x = self.relu1(self.bn1(x))\n        else:\n            out = self.relu1(self.bn1(x))\n        out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n        if self.dropout > 0:\n            out = F.dropout(out, p=self.dropout, training=self.training)\n        out = self.conv2(out)\n        return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\n\nclass NetworkBlock(nn.Module):\n    def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropout):\n        super(NetworkBlock, self).__init__()\n        self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropout)\n\n    def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropout):\n        layers = []\n\n        for i in range(int(nb_layers)):\n            _in_planes = i == 0 and in_planes or out_planes\n            _stride = i == 0 and stride or 1\n            layers.append(block(_in_planes, out_planes, _stride, dropout=dropout))\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        return self.layer(x)\n\n\nclass WideResNet(nn.Module):\n    def __init__(self, depth, num_classes, widen_factor=1, dropout=0.0):\n        super(WideResNet, self).__init__()\n\n        nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]\n\n        assert (depth - 4) % 6 == 0, \"You need to change the number of layers\"\n        n = (depth - 4) / 6\n\n        block = BasicBlock\n\n        self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)\n\n        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropout=dropout)\n        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropout=dropout)\n        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropout=dropout)\n\n        self.bn1 = nn.BatchNorm2d(nChannels[3])\n\n        self.relu = nn.ReLU(inplace=True)\n        self.fc = nn.Linear(nChannels[3], num_classes)\n        self.nChannels = nChannels[3]\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                m.bias.data.zero_()\n\n    def forward(self, x):\n        out = self.conv1(x)\n        out = self.block1(out)\n        out = self.block2(out)\n        out = self.block3(out)\n\n        out = self.relu(self.bn1(out))\n        out = F.adaptive_avg_pool2d(out, 1)\n        out = out.view(-1, self.nChannels)\n        return self.fc(out)\n"
  },
  {
    "path": "pipeline/models/image_models/wide_resnet_fixup.py",
    "content": "\"\"\"\nWide ResNet by Sergey Zagoruyko and Nikos Komodakis\nFixup initialization by Hongyi Zhang, Yann N. Dauphin, Tengyu Ma\nBased on code by xternalz and Andy Brock:\nhttps://github.com/xternalz/WideResNet-pytorch\nhttps://github.com/ajbrock/BoilerPlate\n\"\"\"\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n    m = 2\n\n    def __init__(self, in_planes, out_planes, stride, dropout, fixup_l, fixup_coeff):\n        super(BasicBlock, self).__init__()\n\n        self._dropout = dropout\n\n        self.relu = nn.ReLU(inplace=True)\n        self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n        self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)\n\n        self.equalInOut = in_planes == out_planes\n        self.conv_res = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)\n        self.conv_res = not self.equalInOut and self.conv_res or None\n\n        self.scale = nn.Parameter(torch.ones(1))\n        self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(4)])\n\n        k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels\n        self.conv1.weight.data.normal_(0, fixup_coeff * fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2. / k))\n        self.conv2.weight.data.zero_()\n\n        if self.conv_res is not None:\n            k = self.conv_res.kernel_size[0] * self.conv_res.kernel_size[1] * self.conv_res.out_channels\n            self.conv_res.weight.data.normal_(0, math.sqrt(2. / k))\n\n    def forward(self, x):\n        x_out = self.relu(x + self.biases[0])\n        out = self.conv1(x_out) + self.biases[1]\n        out = self.relu(out) + self.biases[2]\n        if self._dropout > 0:\n            out = F.dropout(out, p=self._dropout, training=self.training)\n        out = self.scale * self.conv2(out) + self.biases[3]\n\n        if self.equalInOut:\n            return torch.add(x, out)\n\n        return torch.add(self.conv_res(x_out), out)\n\n\nclass NetworkBlock(nn.Module):\n    def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropout, fixup_l, fixup_coeff):\n        super(NetworkBlock, self).__init__()\n        self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropout, fixup_l, fixup_coeff)\n\n    def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropout, fixup_l, fixup_coeff):\n        layers = []\n\n        for i in range(int(nb_layers)):\n            _in_planes = i == 0 and in_planes or out_planes\n            _stride = i == 0 and stride or 1\n            layers.append(block(_in_planes, out_planes, _stride, dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff))\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        return self.layer(x)\n\n\nclass WideResNet(nn.Module):\n    def __init__(self, depth, num_classes, widen_factor=1, dropout=0.0, fixup_coeff=1):\n        super(WideResNet, self).__init__()\n\n        nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]\n\n        assert (depth - 4) % 6 == 0, \"You need to change the number of layers\"\n        n = (depth - 4) / 6\n\n        block = BasicBlock\n        fixup_l = n * 3\n\n        self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)\n\n        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropout=dropout,\n                                   fixup_l=fixup_l, fixup_coeff=fixup_coeff)\n        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropout=dropout,\n                                   fixup_l=fixup_l, fixup_coeff=fixup_coeff)\n        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropout=dropout,\n                                   fixup_l=fixup_l, fixup_coeff=fixup_coeff)\n\n        self.relu = nn.ReLU(inplace=True)\n        self.fc = nn.Linear(nChannels[3], num_classes)\n        self.nChannels = nChannels[3]\n\n        self.fc.bias.data.zero_()\n        self.fc.weight.data.zero_()\n\n        k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels\n        self.conv1.weight.data.normal_(0, math.sqrt(2. / k))\n\n        self.bias1 = nn.Parameter(torch.zeros(1))\n        self.bias2 = nn.Parameter(torch.zeros(1))\n\n    def forward(self, x):\n        out = self.conv1(x) + self.bias1\n        out = self.block1(out)\n        out = self.block2(out)\n        out = self.block3(out)\n\n        out = self.relu(out)\n        out = F.adaptive_avg_pool2d(out, 1)\n        out = out.view(-1, self.nChannels)\n        return self.fc(out + self.bias2)\n"
  },
  {
    "path": "pipeline/predictors/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/predictors/base.py",
    "content": "import time\nfrom typing import Iterable\n\nimport torch\nimport torch.nn as nn\n\nfrom ..logger import LOGGER\nfrom ..storage.predictions import PredictionsStorageBase\nfrom ..utils import move_to_device, load_model\n\nimport os\n\n\nclass PredictorBase:\n    def __init__(\n            self,\n            model: nn.Module,\n            data_loader: Iterable,\n            print_frequency: None or int,\n            device: str,\n            model_save_path: str,\n            predictions_storage: PredictionsStorageBase) -> None:\n\n        self.model = model.to(device)\n        self.data_loader = data_loader\n        self.print_frequency = print_frequency\n        self.device = device\n        self.model_save_path = model_save_path\n        self.predictions_storage = predictions_storage\n\n    def predict_step(self, input_data: torch.Tensor):\n        input_data = move_to_device(input_data, device=self.device)\n        model_output = self.model(input_data)\n        return model_output\n\n    def log_predict_step(self, step_id: int, predict_time: float):\n        if self.print_frequency is None or step_id % self.print_frequency == 0:\n            LOGGER.info(\"[{} s] Predict step {}\".format(predict_time, step_id))\n            return True\n\n        return False\n\n    def log_predict_completed(self, predict_time: float):\n        LOGGER.info(\"[{} s] Predict is completed\".format(predict_time))\n        return True\n\n    def load_last_model(self):\n        if os.path.exists(self.model_save_path):\n            epochs = filter(lambda file: file.startswith(\"epoch_\"), os.listdir(self.model_save_path))\n            epochs = map(lambda file: int(file[file.find(\"_\") + 1]), epochs)\n            epochs = list(epochs)\n\n            if epochs:\n                last_model_path = os.path.join(self.model_save_path, \"epoch_{}\".format(max(epochs)))\n                load_model(self.model, last_model_path)\n                return\n\n        LOGGER.info(\"Model not found in {}. Starting to train a model from scratch...\".format(self.model_save_path))\n\n    def run(self):\n        self.load_last_model()\n        self.model.eval()\n\n        step_count = 0\n        start_time = time.time()\n\n        with torch.no_grad():\n            for step_id, (input_data, ids) in enumerate(self.data_loader):\n                model_output = self.predict_step(input_data)\n                self.predictions_storage.add_batch(ids, model_output)\n\n                step_count += 1\n                predict_time = time.time() - start_time\n                self.log_predict_step(step_id, predict_time)\n\n        self.predictions_storage.sort_by_id()\n        self.predictions_storage.flush()\n        predict_time = time.time() - start_time\n        self.log_predict_completed(predict_time)\n        return predict_time\n"
  },
  {
    "path": "pipeline/predictors/classification.py",
    "content": "from .base import PredictorBase\nimport torch\n\n\nclass PredictorClassification(PredictorBase):\n    def predict_step(self, input_data: torch.Tensor):\n        result = super().predict_step(input_data)\n        result = torch.softmax(result, dim=-1)\n        return result\n"
  },
  {
    "path": "pipeline/preprocessing/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/preprocessing/audio_preprocessing/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/preprocessing/image_preprocessing/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/preprocessing/text_preprocessing/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/schedulers/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/schedulers/base.py",
    "content": "import abc\n\n\nclass SchedulerBase(abc.ABC):\n    @abc.abstractmethod\n    def step(self, loss, metrics, epoch_id):\n        pass\n\n\nclass SchedulerWrapperBase(SchedulerBase):\n    def __init__(self, scheduler):\n        self._scheduler = scheduler\n\n\nclass SchedulerWrapperIdentity(SchedulerWrapperBase):\n    def __init__(self, *args, **kwargs):\n        super().__init__(None)\n\n    def step(self, loss, metrics, epoch_id):\n        pass\n\n\nclass SchedulerWrapperLossBase(SchedulerWrapperBase):\n    def __init__(self, scheduler):\n        super().__init__(scheduler)\n\n    def step(self, loss, metrics, epoch_id):\n        return self._scheduler.step(loss, epoch_id)\n\n\nclass SchedulerWrapperMetricsMeanBase(SchedulerWrapperBase):\n    def __init__(self, scheduler):\n        super().__init__(scheduler)\n\n    def step(self, loss, metrics, epoch_id):\n        values = list(metrics.values())\n        mean_metrics = sum(values) / len(values)\n        return self._scheduler.step(mean_metrics, epoch_id)\n"
  },
  {
    "path": "pipeline/schedulers/dropout/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/schedulers/dropout/increase_step.py",
    "content": "from ..base import SchedulerBase\n\nfrom .utils import set_dropout_probability\n\n\nclass SchedulerWrapperIncreaseStep(SchedulerBase):\n    def __init__(self, model, epoch_count, initial_value=0, max_value=0.5):\n        self._model = model\n        self._epoch_count = epoch_count\n        self._initial_value = initial_value\n        self._max_value = max_value\n\n    def step(self, loss, metrics, epoch_id):\n        new_value = (self._max_value - self._initial_value) / self._epoch_count * (epoch_id + 1)\n        set_dropout_probability(self._model, new_value)\n"
  },
  {
    "path": "pipeline/schedulers/dropout/utils.py",
    "content": "import abc\nfrom torch.nn.modules.dropout import _DropoutNd\n\n\ndef set_dropout_probability(module, probability):\n    if isinstance(module, _DropoutNd):\n        module.p = probability\n        return\n\n    for child in module.children():\n        set_dropout_probability(child, probability)\n"
  },
  {
    "path": "pipeline/schedulers/learning_rate/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/schedulers/learning_rate/cyclical_lr_scheduler.py",
    "content": "from ..base import SchedulerWrapperLossBase, SchedulerWrapperMetricsMeanBase\n\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\n\n\nclass SchedulerWrapperLossOnCyclic(SchedulerWrapperLossBase):\n    def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):\n        scheduler = CosineAnnealingLR(\n            optimizer,\n            T_max=T_max,\n            eta_min=eta_min,\n            last_epoch=last_epoch,\n        )\n        super().__init__(scheduler)\n\n\nclass SchedulerWrapperMetricsMeanOnCyclic(SchedulerWrapperMetricsMeanBase):\n    def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):\n        scheduler = CosineAnnealingLR(\n            optimizer,\n            T_max=T_max,\n            eta_min=eta_min,\n            last_epoch=last_epoch,\n        )\n        super().__init__(scheduler)"
  },
  {
    "path": "pipeline/schedulers/learning_rate/reduce_on_plateau.py",
    "content": "from ..base import SchedulerWrapperLossBase, SchedulerWrapperMetricsMeanBase\n\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\nclass SchedulerWrapperLossOnPlateau(SchedulerWrapperLossBase):\n    def __init__(self, optimizer, mode=\"min\", factor=0.5, patience=3, verbose=True, cooldown=3, min_lr=1e-8):\n        scheduler = ReduceLROnPlateau(\n            optimizer,\n            mode=mode,\n            factor=factor,\n            patience=patience,\n            verbose=verbose,\n            cooldown=cooldown,\n            min_lr=min_lr\n        )\n        super().__init__(scheduler)\n\n\nclass SchedulerWrapperMetricsMeanOnPlateau(SchedulerWrapperMetricsMeanBase):\n    def __init__(self, optimizer, mode=\"max\", factor=0.5, patience=3, verbose=True, cooldown=3, min_lr=1e-8):\n        scheduler = ReduceLROnPlateau(\n            optimizer,\n            mode=mode,\n            factor=factor,\n            patience=patience,\n            verbose=verbose,\n            cooldown=cooldown,\n            min_lr=min_lr\n        )\n        super().__init__(scheduler)\n"
  },
  {
    "path": "pipeline/storage/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/storage/predictions.py",
    "content": "from ..core import PipelineError\n\nimport abc\nimport torch\nimport os\n\n\nclass PredictionsStorageBase(abc.ABC):\n    @abc.abstractmethod\n    def add(self, identifier, prediction):\n        pass\n\n    def add_batch(self, identifiers, predictions):\n        for identifier, prediction in zip(identifiers, predictions):\n            self.add(identifier, prediction)\n\n    @abc.abstractmethod\n    def flush(self):\n        pass\n\n    @abc.abstractmethod\n    def get_all(self):\n        pass\n\n    @abc.abstractmethod\n    def get_by_id(self, identifier):\n        pass\n\n    def get_by_id_batch(self, identifiers):\n        result = []\n        for identifier in identifiers:\n            result.append(self.get_by_id(identifier))\n\n        return torch.stack(result)\n\n    @abc.abstractmethod\n    def sort_by_id(self):\n        pass\n\n\nclass PredictionsStorageFiles(PredictionsStorageBase):\n    def __init__(self, path):\n        if os.path.exists(path) and not os.path.isdir(path):\n            raise PipelineError(\"{} should be a directory\".format(path))\n\n        os.makedirs(path, exist_ok=True)\n\n        self._path = path\n\n        self._identifiers = []\n        self._predictions = []\n\n        self._identifier_to_element_id = {}\n\n        if os.path.exists(os.path.join(self._path, \"identifiers\")):\n            self._load_predictions()\n\n    def _load_predictions(self):\n        self._identifiers = torch.load(os.path.join(self._path, \"identifiers\"))\n        self._predictions = torch.load(os.path.join(self._path, \"predictions\"))\n\n        assert len(self._identifiers) == len(self._predictions)\n\n        for i, identifier in enumerate(self._identifiers):\n            self._identifier_to_element_id[identifier] = i\n\n    def _save_predictions(self):\n        assert len(self._identifiers) == len(self._predictions)\n\n        with open(os.path.join(self._path, \"identifiers\"), \"wb\") as fout:\n            torch.save(self._identifiers, fout)\n\n        with open(os.path.join(self._path, \"predictions\"), \"wb\") as fout:\n            torch.save(self._predictions, fout)\n\n    def add(self, identifier, prediction):\n        self._identifiers.append(identifier)\n        self._predictions.append(prediction)\n        self._identifier_to_element_id[identifier] = len(self._identifiers)\n\n    def flush(self):\n        self._save_predictions()\n\n    def get_all(self):\n        return self._identifiers, self._predictions\n\n    def get_by_id(self, identifier):\n        if identifier not in self._identifier_to_element_id:\n            raise PipelineError(\"Key error: {}\".format(identifier))\n\n        element_id = self._identifier_to_element_id[identifier]\n        return self._predictions[element_id]\n\n    def sort_by_id(self):\n        result = sorted(zip(self._identifiers, self._predictions), key=lambda x: x[0])\n        self._identifiers, self._predictions = list(zip(*result))\n        self.flush()\n"
  },
  {
    "path": "pipeline/storage/state.py",
    "content": "from ..core import PipelineError\n\nimport abc\nimport pickle\nimport os\n\n\nclass StateStorageBase(abc.ABC):\n    @abc.abstractmethod\n    def has_key(self, key: str):\n        pass\n\n    @abc.abstractmethod\n    def get_value(self, key: str):\n        pass\n\n    @abc.abstractmethod\n    def remove_key(self, key: str):\n        pass\n\n    @abc.abstractmethod\n    def set_value(self, key: str, value: object):\n        pass\n\n\nclass StateStorageEmpty(StateStorageBase):\n    def set_value(self, key: str, value: object):\n        pass\n\n    def get_value(self, key: str):\n        raise PipelineError(\"Key error: {}\".format(key))\n\n    def has_key(self, key: str):\n        return False\n\n    def remove_key(self, key: str):\n        raise PipelineError(\"Key error: {}\".format(key))\n\n\nclass StateStorageFile(StateStorageBase):\n    def __init__(self, path: str):\n        self._path = path\n\n        if not os.path.exists(path):\n            os.makedirs(os.path.dirname(path), exist_ok=True)\n            with open(path, \"wb\") as fout:\n                pickle.dump({}, fout)\n\n        with open(path, \"rb\") as fin:\n            self._state = pickle.load(fin)\n\n    def _save(self):\n        with open(self._path, \"wb\") as fout:\n            pickle.dump(self._state, fout)\n\n    def has_key(self, key: str):\n        return key in self._state\n\n    def get_value(self, key: str):\n        if key not in self._state:\n            raise PipelineError(\"Key error: {}\".format(key))\n\n        return self._state[key]\n\n    def set_value(self, key: str, value: object):\n        self._state[key] = value\n\n        self._save()\n\n    def remove_key(self, key: str):\n        if key not in self._state:\n            raise PipelineError(\"Key error: {}\".format(key))\n\n        del self._state[key]\n\n        self._save()\n\n"
  },
  {
    "path": "pipeline/trainers/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/trainers/base.py",
    "content": "import time\nfrom typing import Iterable\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\n\nfrom ..core import PipelineError\nfrom ..logger import LOGGER\nfrom ..metrics.base import MetricsCalculatorBase\nfrom pipeline.schedulers.base import SchedulerWrapperMetricsMeanBase, SchedulerWrapperBase\nfrom ..storage.state import StateStorageBase\nfrom ..utils import move_to_device, save_model, load_model\n\nimport os\n\n\nclass TrainerBase:\n    def __init__(\n            self,\n            model: nn.Module,\n            train_data_loader: Iterable,\n            val_data_loader: Iterable,\n            epoch_count: int,\n            optimizer: Optimizer,\n            scheduler: SchedulerWrapperBase,\n            loss: nn.Module,\n            metrics_calculator: MetricsCalculatorBase,\n            print_frequency: None or int,\n            device: str,\n            model_save_path: str,\n            state_storage: StateStorageBase) -> None:\n\n        self.model = model.to(device)\n        self.train_data_loader = train_data_loader\n        self.val_data_loader = val_data_loader\n        self.epoch_count = epoch_count\n        self.optimizer = optimizer\n        self.scheduler = scheduler\n        self.loss = loss\n        self.metrics_calculator = metrics_calculator\n        self.print_frequency = print_frequency\n        self.device = device\n        self.model_save_path = model_save_path\n        self.state_storage = state_storage\n\n    def train_step(self, input_data: torch.Tensor, target: torch.Tensor):\n        input_data = move_to_device(input_data, device=self.device)\n        target = move_to_device(target, device=self.device)\n\n        model_output = self.model(input_data)\n\n        self.optimizer.zero_grad()\n        loss = self.loss(model_output, target)\n\n        loss.backward()\n\n        self.optimizer.step(closure=None)\n\n        return loss.cpu().data.numpy()\n\n    def predict_step(self, input_data: torch.Tensor):\n        input_data = move_to_device(input_data, device=self.device)\n        model_output = self.model(input_data)\n        return model_output\n\n    def log_train_step(self, epoch_id: int, step_id: int, epoch_time: float, loss: float, mean_loss: float):\n        if self.print_frequency is None or step_id % self.print_frequency == 0:\n            LOGGER.info(\"[{} s] Epoch {}. Train step {}. Loss {}. Mean loss {}\".format(\n                epoch_time, epoch_id, step_id, loss, mean_loss))\n            return True\n\n        return False\n\n    def log_validation_step(self, epoch_id: int, step_id: int, epoch_time: float, loss: float, mean_loss: float):\n        if self.print_frequency is None or step_id % self.print_frequency == 0:\n            LOGGER.info(\"[{} s] Epoch {}. Validation step {}. Loss {}. Mean loss {}\".format(\n                epoch_time, epoch_id, step_id, loss, mean_loss))\n\n            return True\n\n        return False\n\n    def log_train_epoch(self, epoch_id: int, epoch_time: float, mean_loss: float):\n        LOGGER.info(\"Training Epoch {} has completed. Time: {}. Mean loss: {}\".format(\n            epoch_id, epoch_time, mean_loss))\n        return True\n\n    def log_validation_epoch(self, epoch_id: int, epoch_time: float, mean_loss: float, metrics: dict):\n        LOGGER.info(\"Validation Epoch {} has completed. Time: {}. Mean loss: {}. Metrics: {}\".format(\n            epoch_id, epoch_time, mean_loss, str(metrics)))\n        return True\n\n    def run_train_epoch(self, epoch_id: int):\n        self.model.train()\n\n        start_time = time.time()\n        mean_loss = 0\n        step_count = 0\n\n        for step_id, (input_data, target) in enumerate(self.train_data_loader):\n            loss = self.train_step(input_data, target)\n            epoch_time = time.time() - start_time\n\n            mean_loss += loss\n            step_count += 1\n\n            self.log_train_step(epoch_id, step_id, epoch_time, loss, mean_loss / step_count)\n\n        epoch_time = time.time() - start_time\n        mean_loss /= max(step_count, 1)\n\n        self.log_train_epoch(epoch_id, epoch_time, mean_loss)\n\n        return epoch_time, mean_loss\n\n    def run_validation_epoch(self, epoch_id: int):\n        self.model.eval()\n\n        self.metrics_calculator.zero_cache()\n        mean_loss = 0\n        step_count = 0\n        start_time = time.time()\n\n        with torch.no_grad():\n            for step_id, (input_data, target) in enumerate(self.val_data_loader):\n                target = move_to_device(target, device=self.device)\n                model_output = self.predict_step(input_data)\n\n                loss = self.loss(model_output, target)\n                mean_loss += loss\n                step_count += 1\n                epoch_time = time.time() - start_time\n\n                self.metrics_calculator.add(model_output, target)\n                self.log_validation_step(epoch_id, step_id, epoch_time, loss, mean_loss / step_count)\n\n        epoch_time = time.time() - start_time\n        mean_loss /= max(step_count, 1)\n        metrics = self.metrics_calculator.calculate()\n\n        self.log_validation_epoch(epoch_id, epoch_time, mean_loss, metrics)\n\n        return epoch_time, mean_loss, metrics\n\n    def load_optimizer_state(self):\n        if not self.state_storage.has_key(\"learning_rates\"):\n            return\n\n        learning_rates = self.state_storage.get_value(\"learning_rates\")\n\n        for learning_rate, param_group in zip(learning_rates, self.optimizer.param_groups):\n            param_group[\"lr\"] = learning_rate\n\n    def save_optimizer_state(self):\n        learning_rates = []\n        for param_group in self.optimizer.param_groups:\n            learning_rates.append(float(param_group['lr']))\n\n        self.state_storage.set_value(\"learning_rates\", learning_rates)\n\n    def save_last_model(self, epoch_id):\n        os.makedirs(self.model_save_path, exist_ok=True)\n        model_path = os.path.join(self.model_save_path, \"epoch_{}\".format(epoch_id))\n        save_model(self.model, model_path)\n        LOGGER.info(\"Model was saved in {}\".format(model_path))\n\n    def load_last_model(self, epoch_id):\n        last_model_path = os.path.join(self.model_save_path, \"epoch_{}\".format(epoch_id))\n        load_model(self.model, last_model_path)\n\n    def run(self):\n        start_epoch_id = 0\n\n        if self.state_storage.has_key(\"start_epoch_id\"):\n            start_epoch_id = self.state_storage.get_value(\"start_epoch_id\")\n            try:\n                self.load_last_model(start_epoch_id - 1)\n            except:\n                LOGGER.exception(\"Exception occurs during loading a model. Starting to train a model from scratch...\")\n        else:\n            LOGGER.info(\"Model not found in {}. Starting to train a model from scratch...\".format(self.model_save_path))\n\n        self.load_optimizer_state()\n\n        epoch_id = start_epoch_id\n        while self.epoch_count is None or epoch_id < self.epoch_count:\n            _, mean_train_loss = self.run_train_epoch(epoch_id)\n\n            if self.val_data_loader is None:\n                if isinstance(self.scheduler, SchedulerWrapperMetricsMeanBase):\n                    raise PipelineError(\"You can't use a scheduler based on metrics without validation data\")\n                self.scheduler.step(mean_train_loss, {}, epoch_id)\n                continue\n\n            _, mean_validation_loss, validation_metrics = self.run_validation_epoch(epoch_id)\n            self.scheduler.step(mean_validation_loss, validation_metrics, epoch_id)\n\n            self.state_storage.set_value(\"start_epoch_id\", epoch_id + 1)\n            self.save_optimizer_state()\n            self.save_last_model(epoch_id)\n\n            epoch_id += 1\n"
  },
  {
    "path": "pipeline/trainers/classification.py",
    "content": "from .base import TrainerBase\n\n\nclass TrainerClassification(TrainerBase):\n    pass\n"
  },
  {
    "path": "pipeline/trainers/segmentation.py",
    "content": "from .base import TrainerBase\n\n\nclass TrainerSegmentation(TrainerBase):\n    pass\n"
  },
  {
    "path": "pipeline/utils.py",
    "content": "from .logger import setup_logger\n\nfrom torch.utils.data import DataLoader\nfrom torch.nn import DataParallel\n\nimport importlib\nimport torch\nimport os\n\n\ndef _load_cls(module_path, cls_name):\n    module_path_fixed = module_path\n    if module_path_fixed.endswith(\".py\"):\n        module_path_fixed = module_path_fixed[:-3]\n    module_path_fixed = module_path_fixed.replace(\"/\", \".\")\n    module = importlib.import_module(module_path_fixed)\n    assert hasattr(module, cls_name), \"{} file should contain {} class\".format(module_path, cls_name)\n\n    cls = getattr(module, cls_name)\n    return cls\n\n\ndef load_config(config_path: str):\n    return _load_cls(config_path, \"Config\")()\n\n\ndef load_predict_config(config_path: str):\n    return _load_cls(config_path, \"PredictConfig\")()\n\n\ndef move_to_device(tensor: list or tuple or torch.Tensor, device: str):\n    if isinstance(tensor, list):\n        return [move_to_device(elem, device=device) for elem in tensor]\n    if isinstance(tensor, tuple):\n        return (move_to_device(elem, device=device) for elem in tensor)\n    return tensor.to(device)\n\n\ndef get_path(path):\n    return os.path.expanduser(path)\n\n\ndef save_model(model, path):\n    if isinstance(model, DataParallel):\n        model = model.module\n\n    with open(path, \"wb\") as fout:\n        torch.save(model.state_dict(), fout)\n\n\ndef load_model(model, path):\n    with open(path, \"rb\") as fin:\n        state_dict = torch.load(fin)\n\n    model.load_state_dict(state_dict)\n\n\ndef run_train(config):\n    train_data_loader = DataLoader(\n        config.train_dataset,\n        batch_size=config.batch_size,\n        shuffle=True,\n        pin_memory=True,\n        num_workers=config.num_workers)\n\n    val_data_loader = DataLoader(\n        config.val_dataset,\n        batch_size=config.batch_size,\n        shuffle=False,\n        num_workers=config.num_workers)\n\n    model = config.model\n\n    model_save_path = config.model_save_path\n    os.makedirs(model_save_path, exist_ok=True)\n\n    logger_path = os.path.join(model_save_path, \"log.txt\")\n    setup_logger(out_file=logger_path)\n\n    trainer = config.trainer_cls(\n        model=model,\n        train_data_loader=train_data_loader,\n        val_data_loader=val_data_loader,\n        epoch_count=config.epoch_count,\n        optimizer=config.optimizer,\n        scheduler=config.scheduler,\n        loss=config.loss,\n        metrics_calculator=config.metrics_calculator,\n        print_frequency=config.print_frequency,\n        device=config.device,\n        model_save_path=config.model_save_path,\n        state_storage=config.state_storage\n    )\n\n    trainer.run()\n\n\ndef run_predict(config):\n    data_loader = DataLoader(\n        config.dataset,\n        batch_size=config.batch_size,\n        shuffle=False,\n        pin_memory=True,\n        num_workers=config.num_workers)\n\n    model = config.model\n\n    model_save_path = config.model_save_path\n    assert os.path.exists(model_save_path), \"{} does not exist\".format(model_save_path)\n\n    logger_path = os.path.join(model_save_path, \"log_predict.txt\")\n    setup_logger(out_file=logger_path)\n\n    predictor = config.predictor_cls(\n        model=model,\n        data_loader=data_loader,\n        print_frequency=config.print_frequency,\n        device=config.device,\n        model_save_path=model_save_path,\n        predictions_storage=config.predictions_storage)\n\n    predictor.run()\n"
  },
  {
    "path": "requirements.txt",
    "content": "torch>=1.0.0\npandas\nnumpy\ntorchvision\nscikit-learn\nPillow\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/common.py",
    "content": "import tempfile\nimport os\n\n\ndef make_temp_path():\n    _, path = tempfile.mkstemp()\n    os.remove(path)\n    return path\n"
  },
  {
    "path": "tests/test_metrics.py",
    "content": "from pipeline.metrics.accuracy import MetricsCalculatorAccuracy\nfrom pipeline.core import PipelineError\n\n\nimport pytest\n\n\nclass TestClassificationMetrics:\n    def test_accuracy(self):\n        metrics_calculator = MetricsCalculatorAccuracy(border=0.4)\n\n        with pytest.raises(PipelineError):\n            metrics_calculator.calculate()\n\n"
  },
  {
    "path": "tests/test_schedulers.py",
    "content": "from pipeline.schedulers.learning_rate.reduce_on_plateau import SchedulerWrapperLossOnPlateau, SchedulerWrapperMetricsMeanOnPlateau\n\nfrom torch.optim import Adam\nimport torch.nn as nn\n\n\nclass TestReduceLROnPlateau:\n    def test_wrapper_loss(self):\n        first_layer = nn.Linear(10, 5)\n        second_layer = nn.Linear(5, 1)\n\n        optimizer = Adam([{\"params\": first_layer.parameters(), \"lr\": 1},\n                          {\"params\": second_layer.parameters(), \"lr\": 2}])\n        scheduler = SchedulerWrapperLossOnPlateau(optimizer, factor=0.5, patience=1, min_lr=0.1, cooldown=2)\n\n        assert optimizer.param_groups[0][\"lr\"] == 1\n        assert optimizer.param_groups[1][\"lr\"] == 2\n\n        scheduler.step(loss=10, metrics={\"a\": 5}, epoch_id=0)\n        assert optimizer.param_groups[0][\"lr\"] == 1\n        assert optimizer.param_groups[1][\"lr\"] == 2\n\n        scheduler.step(loss=11, metrics={\"a\": 3}, epoch_id=1)\n        assert optimizer.param_groups[0][\"lr\"] == 1\n        assert optimizer.param_groups[1][\"lr\"] == 2\n\n        scheduler.step(loss=12, metrics={\"a\": 1}, epoch_id=2)\n        assert optimizer.param_groups[0][\"lr\"] == 0.5\n        assert optimizer.param_groups[1][\"lr\"] == 1\n\n        scheduler.step(loss=13, metrics={\"a\": 2}, epoch_id=3)\n        scheduler.step(loss=14, metrics={\"a\": 5}, epoch_id=4)\n        scheduler.step(loss=14, metrics={\"a\": 2}, epoch_id=5)\n\n        assert optimizer.param_groups[0][\"lr\"] == 0.5\n        assert optimizer.param_groups[1][\"lr\"] == 1\n\n        scheduler.step(loss=14, metrics={\"a\": 100}, epoch_id=6)\n        assert optimizer.param_groups[0][\"lr\"] == 0.25\n        assert optimizer.param_groups[1][\"lr\"] == 0.5\n\n        scheduler.step(loss=9, metrics={\"a\": 21}, epoch_id=7)\n        scheduler.step(loss=8, metrics={\"a\": 21}, epoch_id=7)\n\n        assert optimizer.param_groups[0][\"lr\"] == 0.25\n        assert optimizer.param_groups[1][\"lr\"] == 0.5\n\n        scheduler.step(loss=13, metrics={\"a\": 3}, epoch_id=8)\n\n        assert optimizer.param_groups[0][\"lr\"] == 0.25\n        assert optimizer.param_groups[1][\"lr\"] == 0.5\n\n        scheduler.step(loss=14, metrics=None, epoch_id=9)\n\n        assert optimizer.param_groups[0][\"lr\"] == 0.125\n        assert optimizer.param_groups[1][\"lr\"] == 0.25\n\n        for epoch_id in range(10, 30):\n            scheduler.step(loss=14, metrics={\"absd\": \"asdasd\"}, epoch_id=epoch_id)\n\n        assert optimizer.param_groups[0][\"lr\"] == 0.1\n        assert optimizer.param_groups[1][\"lr\"] == 0.1\n\n    def test_wrapper_metrics(self):\n        model = nn.Linear(10, 1)\n\n        optimizer = Adam(model.parameters(), lr=1)\n        scheduler = SchedulerWrapperMetricsMeanOnPlateau(optimizer, factor=0.5, patience=0, min_lr=0.1, cooldown=0)\n\n        assert optimizer.param_groups[0][\"lr\"] == 1\n\n        scheduler.step(loss=None, metrics={\"a\": 1, \"b\": 1}, epoch_id=0)\n        assert optimizer.param_groups[0][\"lr\"] == 1\n\n        scheduler.step(loss=\"abacaba\", metrics={\"a\": 1, \"b\": 0}, epoch_id=1)\n        scheduler.step(loss=-10, metrics={\"a\": 1, \"b\": 1}, epoch_id=2)\n        assert optimizer.param_groups[0][\"lr\"] == 0.25\n\n        scheduler.step(loss=123, metrics={\"a\": 1, \"b\": 2}, epoch_id=3)\n        assert optimizer.param_groups[0][\"lr\"] == 0.25\n\n        scheduler.step(loss=0, metrics={\"a\": 2}, epoch_id=4)\n        assert optimizer.param_groups[0][\"lr\"] == 0.25\n\n        scheduler.step(loss=0, metrics={\"aasda\": 1.1}, epoch_id=5)\n        assert optimizer.param_groups[0][\"lr\"] == 0.125\n\n        for epoch_id in range(6, 20):\n            scheduler.step(loss=0, metrics={\"c\": 1}, epoch_id=epoch_id)\n            assert optimizer.param_groups[0][\"lr\"] == 0.1\n"
  },
  {
    "path": "tests/test_storage.py",
    "content": "from .common import make_temp_path\n\nfrom pipeline.storage.state import StateStorageEmpty, StateStorageFile\nfrom pipeline.core import PipelineError\n\nimport pytest\n\n\nclass TestStateStorageEmpty:\n    def test_set_value(self):\n        state_storage = StateStorageEmpty()\n        state_storage.set_value(\"key_name\", 123)\n\n    def test_get_value(self):\n        state_storage = StateStorageEmpty()\n\n        with pytest.raises(PipelineError):\n            state_storage.get_value(\"some_key\")\n\n        state_storage.set_value(\"some_key\", 123)\n        with pytest.raises(PipelineError):\n            state_storage.get_value(\"some_key\")\n\n    def test_has_key(self):\n        state_storage = StateStorageEmpty()\n\n        assert not state_storage.has_key(\"key\")\n        state_storage.set_value(\"key\", \"abacaba\")\n\n        assert not state_storage.has_key(\"key\")\n\n    def test_remove_key(self):\n        state_storage = StateStorageEmpty()\n\n        with pytest.raises(PipelineError):\n            state_storage.remove_key(\"abacaba\")\n\n        state_storage.set_value(\"abacaba\", 9.23)\n        with pytest.raises(PipelineError):\n            state_storage.remove_key(\"abacaba\")\n\n\nclass TestStateStorageFile:\n    def test_basic(self):\n        path = make_temp_path()\n        state_storage = StateStorageFile(path)\n\n        assert not state_storage.has_key(\"key\")\n\n        with pytest.raises(PipelineError):\n            state_storage.remove_key(\"abacaba\")\n\n        with pytest.raises(PipelineError):\n            state_storage.get_value(\"some_key\")\n\n    def test_save_load(self):\n        path = make_temp_path()\n        state_storage = StateStorageFile(path)\n\n        state_storage.set_value(\"aba\", 123)\n        assert state_storage.get_value(\"aba\") == 123\n        assert state_storage.has_key(\"aba\")\n\n        state_storage = StateStorageFile(path)\n        assert state_storage.get_value(\"aba\") == 123\n        assert state_storage.has_key(\"aba\")\n\n        state_storage.remove_key(\"aba\")\n        assert not state_storage.has_key(\"aba\")\n\n        state_storage = StateStorageFile(path)\n        assert not state_storage.has_key(\"aba\")\n"
  }
]