[
  {
    "path": ".idea/.gitignore",
    "content": "# Default ignored files\n/shelf/\n/workspace.xml\n# Editor-based HTTP Client requests\n/httpRequests/\n# Datasource local storage ignored files\n/dataSources/\n/dataSources.local.xml\n"
  },
  {
    "path": ".idea/deployment.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"PublishConfigData\" remoteFilesAllowedToDisappearOnAutoupload=\"false\">\n    <serverData>\n      <paths name=\"hm@222.201.187.49:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@116.56.134.138:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@202.38.247.104:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@202.38.247.12:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@202.38.247.12:22 (2)\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@222.201.144.244:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@222.201.144.245:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@222.201.145.184:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@5560n6l068.oicp.vip:38981\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz@5560n6l068.oicp.vip:53987\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n      <paths name=\"lz_theta@222.201.145.184:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n    </serverData>\n  </component>\n</project>"
  },
  {
    "path": ".idea/inspectionProfiles/Project_Default.xml",
    "content": "<component name=\"InspectionProjectProfileManager\">\n  <profile version=\"1.0\">\n    <option name=\"myName\" value=\"Project Default\" />\n    <inspection_tool class=\"PyInterpreterInspection\" enabled=\"false\" level=\"WARNING\" enabled_by_default=\"false\" />\n    <inspection_tool class=\"PyPackageRequirementsInspection\" enabled=\"true\" level=\"WARNING\" enabled_by_default=\"true\">\n      <option name=\"ignoredPackages\">\n        <value>\n          <list size=\"4\">\n            <item index=\"0\" class=\"java.lang.String\" itemvalue=\"scienceplots\" />\n            <item index=\"1\" class=\"java.lang.String\" itemvalue=\"latextable\" />\n            <item index=\"2\" class=\"java.lang.String\" itemvalue=\"texttable\" />\n            <item index=\"3\" class=\"java.lang.String\" itemvalue=\"overrides\" />\n          </list>\n        </value>\n      </option>\n    </inspection_tool>\n  </profile>\n</component>"
  },
  {
    "path": ".idea/inspectionProfiles/profiles_settings.xml",
    "content": "<component name=\"InspectionProjectProfileManager\">\n  <settings>\n    <option name=\"USE_PROJECT_PROFILE\" value=\"false\" />\n    <version value=\"1.0\" />\n  </settings>\n</component>"
  },
  {
    "path": ".idea/modules.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"ProjectModuleManager\">\n    <modules>\n      <module fileurl=\"file://$PROJECT_DIR$/.idea/time-series-ptms.iml\" filepath=\"$PROJECT_DIR$/.idea/time-series-ptms.iml\" />\n    </modules>\n  </component>\n</project>"
  },
  {
    "path": ".idea/time-series-ptms.iml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<module type=\"PYTHON_MODULE\" version=\"4\">\n  <component name=\"NewModuleRootManager\">\n    <content url=\"file://$MODULE_DIR$\" />\n    <orderEntry type=\"inheritedJdk\" />\n    <orderEntry type=\"sourceFolder\" forTests=\"false\" />\n  </component>\n  <component name=\"PyDocumentationSettings\">\n    <option name=\"format\" value=\"GOOGLE\" />\n    <option name=\"myDocStringFormat\" value=\"Google\" />\n  </component>\n</module>"
  },
  {
    "path": ".idea/vcs.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"VcsDirectoryMappings\">\n    <mapping directory=\"$PROJECT_DIR$\" vcs=\"Git\" />\n  </component>\n</project>"
  },
  {
    "path": "README.md",
    "content": "# [A Survey on Time-Series Pre-Trained Models](https://arxiv.org/pdf/2305.10716v2)\n\nThis is the training code for our paper *\"[A Survey on Time-Series Pre-Trained Models](https://arxiv.org/pdf/2305.10716v2)\"*, which has been accepted for publication in the IEEE Transactions on Knowledge and Data Engineering (TKDE-24).\n\n## Overview\n\nTime-Series Mining (TSM) is an important research area since it shows great potential in practical applications. Deep learning models that rely on massive labeled data have been utilized for TSM successfully. However, constructing a large-scale well-labeled dataset is difficult due to data annotation costs. \nRecently, pre-trained models have gradually attracted attention in the time series domain due to their remarkable performance in computer vision and natural language processing. In this survey, we provide a comprehensive review of Time-Series Pre-Trained Models (TS-PTMs), aiming to guide the understanding, applying, and studying TS-PTMs. \nSpecifically, we first briefly introduce the typical deep learning models employed in TSM. Then, we give an overview of TS-PTMs according to the pre-training techniques. The main categories we explore include supervised, unsupervised, and self-supervised TS-PTMs.\nFurther, extensive experiments involving  27 methods, 434 datasets, and 679 transfer learning scenarios are conducted to analyze the advantages and disadvantages of transfer learning strategies, Transformer-based models, and representative TS-PTMs. Finally, we point out some potential directions of TS-PTMs for future work.\n\n<p align=\"center\">\n    <img src=\"pictures/framework.jpg\" width=\"1000\" align=\"center\">\n</p>\n\n\n## Datasets\nThe datasets used in this project are as follows:\n### Time-Series Classification\n* [128 UCR datasets](https://www.cs.ucr.edu/~eamonn/time_series_data_2018/UCRArchive_2018.zip)\n* [30 UEA datasets](http://www.timeseriesclassification.com/Downloads/Archives/Multivariate2018_arff.zip)\n* [SleepEEG dataset](https://www.physionet.org/content/sleep-edfx/1.0.0/) \n* [Epilepsy dataset](https://repositori.upf.edu/handle/10230/42894) \n* [FD-A and FD-B datasets](https://mb.uni-paderborn.de/en/kat/main-research/datacenter/bearing-datacenter/data-sets-and-download) \n* [HAR dataset](https://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones) \n* [Gesture dataset](http://www.timeseriesclassification.com/description.php?Dataset=UWaveGestureLibrary) \n* [ECG dataset](https://physionet.org/content/challenge-2017/1.0.0/) \n* [EMG dataset](https://physionet.org/content/emgdb/1.0.0/) \n\n### Time-Series Forecasting\n* [ETDataset (including 4 datasets)](https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014)\n* [Electricity](https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014)\n* [Traffic](http://pems.dot.ca.gov)\n* [Weather](https://www.bgc-jena.mpg.de/wetter)\n* [Exchange](https://github.com/laiguokun/multivariate-time-series-data)\n* [ILI](https://gis.cdc.gov/grasp/fluview/fluportaldashboard.html)\n\n### Time-Series Anomaly Detection\n* [Yahoo dataset](https://webscope.sandbox.yahoo.com/catalog.php?datatype=s&did=70) \n* [KPI dataset](http://test-10056879.file.myqcloud.com/10056879/test/20180524_78431960010324/KPI%E5%BC%82%E5%B8%B8%E6%A3%80%E6%B5%8B%E5%86%B3%E8%B5%9B%E6%95%B0%E6%8D%AE%E9%9B%86.zip)\n* [250 UCR anomaly detection datasets](https://wu.renjie.im/research/anomaly-benchmarks-are-flawed/#ucr-time-series-anomaly-archiv) \n* [MSL dataset](https://github.com/khundman/telemanom) \n* [SMAP dataset](https://github.com/eBay/RANSynCoders) \n* [PSM dataset](https://github.com/khundman/telemanom) \n* [SMD dataset](https://github.com/NetManAIOps/OmniAnomaly) \n* [SWaT dataset](https://itrust.sutd.edu.sg/itrust-labs_datasets/dataset_info/#swat) \n* [NIPS-TS-SWAN dataset](https://github.com/datamllab/tods/tree/benchmark/benchmark) \n* [NIPS-TS-GECCO dataset](https://github.com/datamllab/tods/tree/benchmark/benchmark) \n\n\n\n## Pre-Trained Models on Time Series Classification\n- [x] [FCN](https://github.com/cauchyturing/UCR_Time_Series_Classification_Deep_Learning_Baseline)\n- [x] [FCN Encoder+CNN Decoder](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_classification_methods/model/tsm_model.py)\n- [x] [FCN Encoder+RNN Decoder](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_classification_methods/model/tsm_model.py)\n- [x] [TCN](https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries)\n- [x] [Transformer](https://github.com/gzerveas/mvts_transformer)\n- [x] [TST](https://github.com/gzerveas/mvts_transformer)\n- [x] [T-Loss](https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries)\n- [x] [SelfTime](https://github.com/haoyfan/SelfTime)\n- [x] [TS-TCC](https://github.com/emadeldeen24/TS-TCC)\n- [x] [TS2Vec](https://github.com/zhihanyue/ts2vec)\n- [x] [TimesNet](https://github.com/thuml/TimesNet)\n- [x] [PatchTST](https://github.com/yuqinie98/PatchTST)\n- [x] [GPT4TS](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All)\n\nFor details, please refer to [ts_classification_methods/README](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_classification_methods/README.md).\n\n## Pre-Trained Models on Time Series Forecasting\n\n- [x] [LogTrans](https://github.com/AIStream-Peelout/flow-forecast)\n- [x] [TCN](https://github.com/locuslab/TCN)\n- [x] [Informer](https://github.com/zhouhaoyi/Informer2020)\n- [x] [Autoformer](https://github.com/thuml/autoformer)\n- [x] [TS2Vec](https://github.com/zhihanyue/ts2vec)\n- [x] [CoST](https://github.com/salesforce/CoST)\n- [x] [TimesNet](https://github.com/thuml/TimesNet)\n- [x] [PatchTST](https://github.com/yuqinie98/PatchTST)\n- [x] [DLinear](https://github.com/vivva/DLinear)\n- [x] [GPT4TS](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All)\n- [x] [TEMPO](https://github.com/DC-research/TEMPO)\n- [x] [iTransformer](https://github.com/thuml/iTransformer)\n\nFor details, please refer to [ts_forecating_methods/README](https://github.com/qianlima-lab/transfer-to-transformer-tsm/blob/master/ts_forecasting_methods/README.md).\n\n## Pre-Trained Models on Time Series Anomaly Detection\n\n- [x] [SPOT](https://github.com/limjcst/ads-evt)\n- [x] [DSPOT](https://github.com/limjcst/ads-evt)\n- [x] [LSTM-VAE](https://github.com/SchindlerLiang/VAE-for-Anomaly-Detection)\n- [x] [DONUT](https://github.com/NetManAIOps/donut)\n- [x] [Spectral Residual (SR)](https://dl.acm.org/doi/10.1145/3292500.3330680)\n- [x] [Anomaly Transformer (AT)](https://github.com/spencerbraun/anomaly_transformer_pytorch)\n- [x] [TS2Vec](https://github.com/zhihanyue/ts2vec)\n- [x] [TimesNet](https://github.com/thuml/TimesNet)\n- [x] [GPT4TS](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All)\n- [x] [DCdetector](https://github.com/DAMO-DI-ML/KDD2023-DCdetector)\n\nFor details, please refer to [ts_anomaly_detection_methods/README](https://github.com/qianlima-lab/transfer-to-transformer-tsm/blob/master/ts_anomaly_detection_methods/README.md).\n\n## Acknowledgments\nWe thank the anonymous reviewers for their helpful feedback. We thank Professor **Eamonn Keogh** from UCR and all the people who have contributed to the UCR\\&UEA time series archives and other time series datasets. The authors would like to thank \nProfessor **Garrison W. Cottrell** from UCSD, and **Chuxin Chen**, **Xidi Cai**, **Yu Chen**, and **Peitian Ma** from SCUT for the helpful suggestions. \n\n## Citation\nIf you use this code for your research, please cite our paper:\n```\n@article{ma2024survey,\n  title={A survey on time-series pre-trained models},\n  author={Ma, Qianli and Liu, Zhen and Zheng, Zhenjing and Huang, Ziyang and Zhu, Siying and Yu, Zhongzhong and Kwok, James T},\n  journal={IEEE Transactions on Knowledge and Data Engineering},\n  year={2024}\n}\n```\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/README.md",
    "content": "This is the time-series anomaly detection training code for our paper *\"A Survey on Time-Series Pre-Trained Models\"*\n\n## Baselines\n\n|  ID  |                            Method                            | Year |   Press   |                         Source Code                          |\n| :--: | :----------------------------------------------------------: | :--: | :-------: | :----------------------------------------------------------: |\n|  1   |  [SPOT](https://dl.acm.org/doi/abs/10.1145/3097983.3098144)  | 2017 |    KDD    |     [github_link](https://github.com/Amossys-team/SPOT)      |\n|  2   | [DSPOT](https://dl.acm.org/doi/abs/10.1145/3097983.3098144)  | 2017 |    KDD    |     [github_link](https://github.com/Amossys-team/SPOT)      |\n|  3   | [LSTM-VAE](https://ieeexplore.ieee.org/abstract/document/8279425) | 2018 | IEEE RA.L | [github_link](https://github.com/SchindlerLiang/VAE-for-Anomaly-Detection) |\n|  4   | [DONUT](https://dl.acm.org/doi/abs/10.1145/3178876.3185996)  | 2018 |    WWW    |     [github_link](https://github.com/NetManAIOps/donut)      |\n|  5   |  [Spectral Residual (SR)*](https://dl.acm.org/doi/abs/10.1145/3292500.3330680)   | 2019 |    KDD    |                              -                               |\n|  6   |            [Anomaly Transformer (AT)](https://arxiv.org/abs/2110.02642)            | 2022 |   ICLR    | [github_link](https://github.com/spencerbraun/anomaly_transformer_pytorch) |\n|  7   | [TS2Vec](https://www.aaai.org/AAAI22Papers/AAAI-8809.YueZ.pdf) | 2022 |   AAAI    |      [github_link](https://github.com/yuezhihan/ts2vec)      |\n|  8   | [TimesNet](https://openreview.net/pdf?id=ju_Uqw384Oq) | 2023 |   ICLR    |      [github_link](https://github.com/thuml/TimesNet)      |\n|  9   | [GPT4TS](https://arxiv.org/abs/2302.11939) | 2023 |   NeurIPS    |      [github_link](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All)      |\n|  10   | [DCdetector](https://arxiv.org/abs/2306.10347) | 2023 |   KDD    |      [github_link](https://github.com/DAMO-DI-ML/KDD2023-DCdetector)      |\n\n\nFor details, please refer to [ts_anomaly_detection_methods/other_anomaly_baselines/README](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_anomaly_detection_methods/other_anomaly_baselines/README.md)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/ATmodelbatch.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nimport numpy as np\nfrom utils import data_slice,split_N_pad\nimport time\nfrom torch.utils.data import DataLoader, TensorDataset, SequentialSampler\n\nif torch.cuda.is_available():\n    torch.set_default_tensor_type('torch.cuda.DoubleTensor')\nelse:\n    torch.set_default_tensor_type('torch.DoubleTensor')\n\nclass AnomalyAttention(nn.Module):\n    def __init__(self, N, d_model):\n        super(AnomalyAttention, self).__init__()\n        self.d_model = d_model\n        self.N = N\n\n        self.Wq = nn.Linear(d_model, d_model, bias=False)\n        self.Wk = nn.Linear(d_model, d_model, bias=False)\n        self.Wv = nn.Linear(d_model, d_model, bias=False)\n        self.Ws = nn.Linear(d_model, 1, bias=False)\n        self.Q = self.K = self.V = self.sigma = torch.zeros((N, d_model))\n        self.P = torch.zeros((N, N))\n        self.S = torch.zeros((N, N))\n\n    def forward(self, x):\n        #x :[batch,N,d_model]\n        self.initialize(x)\n        self.S = self.series_association()\n        self.P = self.prior_association()\n        Z = self.reconstruction()\n        return Z\n\n    def initialize(self, x):\n        self.Q = self.Wq(x)\n        self.K = self.Wk(x)\n        self.V = self.Wv(x)\n        self.sigma = self.Ws(x)\n\n    @staticmethod\n    def gaussian_kernel(mean, sigma):\n        normalize = 1 / (math.sqrt(2 * torch.pi) * torch.abs(sigma))\n        return normalize * torch.exp(-0.5 * (mean / sigma).pow(2))\n\n    def prior_association(self):\n        # qwe = torch.from_numpy(\n        #     np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])\n        # ).cuda\n        qwe = torch.from_numpy(\n            np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])\n        )\n        if torch.cuda.is_available():\n            qwe = qwe.cuda()\n        #原 gaussian: [batch,N,N]\n        #因为是高斯所以这里行列求和都一样\n        gaussian = self.gaussian_kernel(qwe.double(), self.sigma)\n        gaussian /= gaussian.sum(dim=-1).view(-1,self.N,1)\n        return gaussian\n\n    def series_association(self):\n        # 原 [N,N]\n        # return F.softmax(self.Q @ self.K.T / math.sqrt(self.d_model), dim=0)\n        # 现 [batch,N,N],是列方向的softmax？,应该是不对的，得改成行方向的softmax，根据下游的reconstruction来看\n        return F.softmax(torch.matmul(self.Q,self.K.transpose(1,2)) / math.sqrt(self.d_model), dim=2)\n\n    def reconstruction(self):\n        return torch.matmul(self.S,self.V)\n\nclass AnomalyTransformerBlock(nn.Module):\n    def __init__(self, N, d_model):\n        super().__init__()\n        self.N, self.d_model = N, d_model\n\n        self.attention = AnomalyAttention(self.N, self.d_model)\n        self.ln1 = nn.LayerNorm(self.d_model)\n        self.ff = nn.Sequential(nn.Linear(self.d_model, self.d_model), nn.ReLU())\n        self.ln2 = nn.LayerNorm(self.d_model)\n\n    def forward(self, x):\n        # x: [batch,N,d_model]\n        x_identity = x\n        x = self.attention(x)\n        z = self.ln1(x + x_identity)\n        z_identity = z\n        z = self.ff(z)\n        z = self.ln2(z + z_identity)\n        \n        # z: [batch,N,d_model]\n        return z\n\nclass AnomalyTransformer(nn.Module):\n    def __init__(self,batch_size, N, in_channel, d_model, layers, lambda_):\n        super().__init__()\n        self.batch_size = batch_size\n        self.in_channel = in_channel\n        self.N = N\n        self.d_model = d_model\n\n        self.input2hidden = nn.Linear(self.in_channel,self.d_model)\n        self.hidden2output = nn.Linear(self.d_model,self.in_channel)\n        self.blocks = nn.ModuleList(\n            [AnomalyTransformerBlock(self.N, self.d_model) for _ in range(layers)]\n        )\n        self.output = None\n        self.lambda_ = lambda_\n\n        self.P_layers = []\n        self.S_layers = []\n    def to_string(self):\n        return 'in_channel:%d_N:%d_dmodel:%d_' % (self.in_channel,self.N,self.d_model)\n\n    def forward(self, x):\n        \n        # x: [batch,N,in_channel]\n        self.P_layers = []\n        self.S_layers = []\n        x = self.input2hidden(x)\n        for idx, block in enumerate(self.blocks):\n            x = block(x)\n            # x: [batch,N,d_model]\n            self.P_layers.append(block.attention.P)\n            self.S_layers.append(block.attention.S)\n        self.output = self.hidden2output(x)\n        # output: [batch,N,in_channel]\n        return self.output\n    \n    # def layer_association_discrepancy(self, Pl, Sl, x):\n    #     rowwise_kl = lambda row: (\n    #         F.kl_div(Pl[row, :], Sl[row, :]) + F.kl_div(Sl[row, :], Pl[row, :])\n    #     )\n    #     ad_vector = torch.concat(\n    #         [rowwise_kl(row).unsqueeze(0) for row in range(Pl.shape[0])]\n    #     )\n    #     return ad_vector\n    # ad_vector: [N]\n    \n    # def rowwise_kl (self,Pl,Sl,idx,row):\n    #     return F.kl_div(Pl[idx,row, :], Sl[idx,row, :]) + F.kl_div(Sl[idx,row, :], Pl[idx,row, :])\n    # def layer_association_discrepancy(self, Pl, Sl, x):\n        \n    #     wholetmp=[]\n    #     for idx in range(Pl.shape[0]):\n    #         rowtmp=[]\n    #         for row in range(Pl.shape[1]):\n    #             rowtmp.append(self.rowwise_kl(Pl,Sl,idx,row).unsqueeze(0))\n    #         wholetmp.append(torch.cat(rowtmp))\n                \n    #     ad_vector = torch.cat( \n    #         wholetmp\n    #     ).reshape([-1,Pl.shape[1]])\n    #     #ad_vector: [batch,N]\n    #     return ad_vector\n    \n    def rowwise_kl(self, row, Pl, Sl, eps=1e-4):\n        Pl_r = Pl[:,row,:]\n        Sl_r = Sl[:,row,:]\n        Pl_r = (Pl_r+ eps) / torch.sum(Pl_r + eps, dim=-1, keepdims=True)\n        Sl_r = (Sl_r + eps) / torch.sum(Sl_r+ eps, dim=-1, keepdims=True)\n        '''TODO:改这个函数'''\n        ret = torch.sum( \n            F.kl_div( torch.log(Pl_r), Sl_r, reduction='none') + F.kl_div( torch.log(Sl_r), Pl_r, reduction='none'), dim=1\n         )\n        return ret\n    def layer_association_discrepancy(self, Pl, Sl, x):\n        ad_vector = torch.concat(\n            [self.rowwise_kl(row, Pl, Sl).unsqueeze(1) for row in range(Pl.shape[1])], dim=1\n        )\n        return ad_vector\n\n    def association_discrepancy(self, P_list, S_list, x):\n        \n        ret = (1 / len(P_list)) * sum(\n            [\n                self.layer_association_discrepancy(P, S, x)\n                for P, S in zip(P_list, S_list)\n            ]\n        )\n        # ret: [batch,N]\n        return ret\n\n    def loss_function(self, x_hat, P_list, S_list, lambda_, x):\n        #P_list: [layers,batch,N,N]\n        #S_list: [layers,batch,N,N]\n        frob_norm = torch.linalg.matrix_norm(x_hat - x, ord=\"fro\")\n        ret = frob_norm - (\n            lambda_\n            * torch.linalg.norm(self.association_discrepancy(P_list, S_list, x),dim=1, ord=1)\n        )\n        return ret.mean()\n\n    def min_loss(self, x):\n        \n        P_list = self.P_layers\n        S_list = [S.detach() for S in self.S_layers]\n        # S_list = self.S_layers\n        lambda_ = -self.lambda_\n        return self.loss_function(self.output, P_list, S_list, lambda_, x)\n\n    def max_loss(self, x):\n        P_list = [P.detach() for P in self.P_layers]\n        # P_list = self.P_layers\n        S_list = self.S_layers\n        lambda_ = self.lambda_\n        return self.loss_function(self.output, P_list, S_list, lambda_, x)\n    \n    def anomaly_score_whole(self, x):\n        # x:[length,dim]\n        x = np.array(split_N_pad(x.reshape([-1,1]),self.N))\n        '''TODO:测试data_slice'''\n        data = torch.from_numpy(x)\n        if torch.cuda.is_available():\n            data = data.cuda()\n        dataset = TensorDataset(data)\n        dataloader = DataLoader(dataset, batch_size=min(self.batch_size, len(dataset)), shuffle=False, drop_last=False)\n        scores=[]\n        for step, batch in enumerate(dataloader):\n            batch=batch[0]\n            score = self.anomaly_score(batch)\n            scores.append(score)\n        return torch.cat(scores).flatten()\n            \n    \n\n    def anomaly_score(self, x):\n        # 原 x:[N,in_channel]\n        output = self.forward(x)\n        tmp = -self.association_discrepancy(self.P_layers, self.S_layers, x)\n        ad = F.softmax(\n            tmp, dim=0\n        )\n        assert ad.shape[1] == self.N\n\n        # norm = torch.tensor(\n        #     [\n        #         torch.linalg.norm(x[i, :] - self.output[i, :], ord=2)\n        #         for i in range(self.N)\n        #     ]\n        # )\n        norm = []\n        for idx in range(x.shape[0]):\n            tmp = torch.tensor(\n                [\n                    torch.linalg.norm(x[idx,i, :] - self.output[idx,i, :], ord=2)\n                    for i in range(self.N)\n                ]\n            )\n            norm.append(tmp)\n        norm = torch.cat(norm).reshape([-1,self.N])\n        assert norm.shape[1] == self.N\n        score = torch.mul(ad, norm)\n        return score\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/datautils.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom datetime import datetime\nimport pickle\nfrom utils import pkl_load, pad_nan_to_target,data_slice,pad_zero_to_target\nfrom scipy.io.arff import loadarff\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\ndef load_UCR(dataset):\n    train_file = os.path.join('datasets/UCR', dataset, dataset + \"_TRAIN.tsv\")\n    test_file = os.path.join('datasets/UCR', dataset, dataset + \"_TEST.tsv\")\n    train_df = pd.read_csv(train_file, sep='\\t', header=None)\n    test_df = pd.read_csv(test_file, sep='\\t', header=None)\n    train_array = np.array(train_df)\n    test_array = np.array(test_df)\n\n    # Move the labels to {0, ..., L-1}\n    labels = np.unique(train_array[:, 0])\n    transform = {}\n    for i, l in enumerate(labels):\n        transform[l] = i\n\n    train = train_array[:, 1:].astype(np.float64)\n    train_labels = np.vectorize(transform.get)(train_array[:, 0])\n    test = test_array[:, 1:].astype(np.float64)\n    test_labels = np.vectorize(transform.get)(test_array[:, 0])\n\n    # Normalization for non-normalized datasets\n    # To keep the amplitude information, we do not normalize values over\n    # individual time series, but on the whole dataset\n    if dataset not in [\n        'AllGestureWiimoteX',\n        'AllGestureWiimoteY',\n        'AllGestureWiimoteZ',\n        'BME',\n        'Chinatown',\n        'Crop',\n        'EOGHorizontalSignal',\n        'EOGVerticalSignal',\n        'Fungi',\n        'GestureMidAirD1',\n        'GestureMidAirD2',\n        'GestureMidAirD3',\n        'GesturePebbleZ1',\n        'GesturePebbleZ2',\n        'GunPointAgeSpan',\n        'GunPointMaleVersusFemale',\n        'GunPointOldVersusYoung',\n        'HouseTwenty',\n        'InsectEPGRegularTrain',\n        'InsectEPGSmallTrain',\n        'MelbournePedestrian',\n        'PickupGestureWiimoteZ',\n        'PigAirwayPressure',\n        'PigArtPressure',\n        'PigCVP',\n        'PLAID',\n        'PowerCons',\n        'Rock',\n        'SemgHandGenderCh2',\n        'SemgHandMovementCh2',\n        'SemgHandSubjectCh2',\n        'ShakeGestureWiimoteZ',\n        'SmoothSubspace',\n        'UMD'\n    ]:\n        return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels\n    \n    mean = np.nanmean(train)\n    std = np.nanstd(train)\n    train = (train - mean) / std\n    test = (test - mean) / std\n    return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels\n\ndef load_anomaly(name):\n    res = pkl_load(f'datasets/{name}.pkl')\n    return res['all_train_data'], res['all_train_labels'], res['all_train_timestamps'], \\\n           res['all_test_data'],  res['all_test_labels'],  res['all_test_timestamps'], \\\n           res['delay']\n\n\ndef gen_ano_train_data(all_train_data):\n    maxl = np.max([ len(all_train_data[k]) for k in all_train_data ])\n    pretrain_data = []\n    for k in all_train_data:\n        train_data = pad_zero_to_target(all_train_data[k], maxl, axis=0)\n        pretrain_data.append(train_data)\n    pretrain_data = np.expand_dims(np.stack(pretrain_data), 2)\n    return pretrain_data\n\nif __name__ == '__main__':\n    dataset='yahoo'\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = load_anomaly(dataset)\n    train_data = gen_ano_train_data(all_train_data)\n    train_data_s = data_slice(train_data, 100)"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/__init__.py",
    "content": "from .encoder import TSEncoder\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/anomaly_transformer_model.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass AnomalyAttention(nn.Module):\n    def __init__(self, N, d_model):\n        super(AnomalyAttention, self).__init__()\n        self.d_model = d_model\n        self.N = N\n\n        self.Wq = nn.Linear(d_model, d_model, bias=False)\n        self.Wk = nn.Linear(d_model, d_model, bias=False)\n        self.Wv = nn.Linear(d_model, d_model, bias=False)\n        self.Ws = nn.Linear(d_model, 1, bias=False)\n\n        self.Q = torch.zeros((N, d_model))\n        self.K = torch.zeros((N, d_model))\n        self.V = torch.zeros((N, d_model))\n        self.sigma = torch.zeros((N, 1))\n\n        self.P = torch.zeros((N, N))\n        self.S = torch.zeros((N, N))\n\n    def forward(self, x):\n        # x: N x d_model\n\n        self.initialize(x)\n        self.P = self.prior_association()\n        self.S = self.series_association()\n        Z = self.reconstruction() # N x d_model\n\n        return Z\n\n    def initialize(self, x):\n        self.Q = self.Wq(x)\n        self.K = self.Wk(x)\n        self.V = self.Wv(x)\n        self.sigma = self.Ws(x)\n\n    @staticmethod\n    def gaussian_kernel(mean, sigma):\n        normalize = 1 / (math.sqrt(2 * torch.pi) * sigma)\n        return normalize * torch.exp(-0.5 * (mean / sigma).pow(2))\n\n    def prior_association(self):\n        p = torch.from_numpy(\n            np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])\n        )\n        gaussian = self.gaussian_kernel(p.float(), self.sigma)\n        gaussian /= gaussian.sum(dim=-1).view(-1, 1)\n\n        return gaussian\n\n    def series_association(self):\n        return F.softmax((self.Q @ self.K.T) / math.sqrt(self.d_model), dim=0)\n\n    def reconstruction(self):\n        return self.S @ self.V\n\n\nclass AnomalyTransformerBlock(nn.Module):\n    def __init__(self, N, d_model):\n        super().__init__()\n        self.N, self.d_model = N, d_model\n\n        self.attention = AnomalyAttention(self.N, self.d_model)\n        self.ln1 = nn.LayerNorm(self.d_model)\n        self.ff = nn.Sequential(nn.Linear(self.d_model, self.d_model), nn.ReLU())\n        self.ln2 = nn.LayerNorm(self.d_model)\n\n    def forward(self, x):\n        x_identity = x\n        x = self.attention(x)\n        z = self.ln1(x + x_identity)\n\n        z_identity = z\n        z = self.ff(z)\n        z = self.ln2(z + z_identity)\n\n        return z\n\n\nclass AnomalyTransformer(nn.Module):\n    def __init__(self, N, in_channel, d_model, layers, lambda_):\n        super().__init__()\n        self.in_channel = in_channel\n        self.N = N\n        self.d_model = d_model\n\n        self.input2hidden = nn.Linear(self.in_channel, self.d_model)\n        self.blocks = nn.ModuleList(\n            [AnomalyTransformerBlock(self.N, self.d_model) for _ in range(layers)]\n        )\n        self.output = None\n        self.lambda_ = lambda_\n\n        self.P_layers = []\n        self.S_layers = []\n\n    def forward(self, x):\n        x = self.input2hidden(x)\n\n        for idx, block in enumerate(self.blocks):\n            x = block(x)\n            self.P_layers.append(block.attention.P)\n            self.S_layers.append(block.attention.S)\n\n        self.output = x # N x d_model\n        return x \n\n    def layer_association_discrepancy(self, Pl, Sl, x):\n        rowwise_kl = lambda row: (\n            F.kl_div(Pl[row, :], Sl[row, :]) + F.kl_div(Sl[row, :], Pl[row, :])\n        )\n        ad_vector = torch.concat(\n            [rowwise_kl(row).unsqueeze(0) for row in range(Pl.shape[0])]\n        )\n        return ad_vector\n\n    def association_discrepancy(self, P_list, S_list, x):\n\n        return (1 / len(P_list)) * sum(\n            [\n                self.layer_association_discrepancy(P, S, x)\n                for P, S in zip(P_list, S_list)\n            ]\n        )\n\n    def loss_function(self, x_hat, P_list, S_list, lambda_, x):\n        frob_norm = torch.linalg.matrix_norm(x_hat - x, ord=\"fro\")\n        return frob_norm - (\n            lambda_\n            * torch.linalg.norm(self.association_discrepancy(P_list, S_list, x), ord=1)\n        )\n\n    def min_loss(self, x):\n        P_list = self.P_layers\n        S_list = [S.detach() for S in self.S_layers]\n        lambda_ = -self.lambda_\n        return self.loss_function(self.output, P_list, S_list, lambda_, x)\n\n    def max_loss(self, x):\n        P_list = [P.detach() for P in self.P_layers]\n        S_list = self.S_layers\n        lambda_ = self.lambda_\n        return self.loss_function(self.output, P_list, S_list, lambda_, x)\n\n    def anomaly_score(self, x):\n        ad = F.softmax(\n            -self.association_discrepancy(self.P_layers, self.S_layers, x), dim=0\n        )\n\n        assert ad.shape[0] == self.N\n\n        norm = torch.tensor(\n            [\n                torch.linalg.norm(x[i, :] - self.output[i, :], ord=2)\n                for i in range(self.N)\n            ]\n        )\n\n        assert norm.shape[0] == self.N\n\n        score = torch.mul(ad, norm)\n\n        return score"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/dilated_conv.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass SamePadConv(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1):\n        super().__init__()\n        self.receptive_field = (kernel_size - 1) * dilation + 1\n        padding = self.receptive_field // 2\n        self.conv = nn.Conv1d(\n            in_channels, out_channels, kernel_size,\n            padding=padding,\n            dilation=dilation,\n            groups=groups\n        )\n        self.remove = 1 if self.receptive_field % 2 == 0 else 0\n        \n    def forward(self, x):\n        out = self.conv(x)\n        if self.remove > 0:\n            out = out[:, :, : -self.remove]\n        return out\n    \nclass ConvBlock(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation, final=False):\n        super().__init__()\n        self.conv1 = SamePadConv(in_channels, out_channels, kernel_size, dilation=dilation)\n        self.conv2 = SamePadConv(out_channels, out_channels, kernel_size, dilation=dilation)\n        self.projector = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels or final else None\n    \n    def forward(self, x):\n        residual = x if self.projector is None else self.projector(x)\n        x = F.gelu(x)\n        x = self.conv1(x)\n        x = F.gelu(x)\n        x = self.conv2(x)\n        return x + residual\n\nclass DilatedConvEncoder(nn.Module):\n    def __init__(self, in_channels, channels, kernel_size):\n        super().__init__()\n        self.net = nn.Sequential(*[\n            ConvBlock(\n                channels[i-1] if i > 0 else in_channels,\n                channels[i],\n                kernel_size=kernel_size,\n                dilation=2**i,\n                final=(i == len(channels)-1)\n            )\n            for i in range(len(channels))\n        ])\n        \n    def forward(self, x):\n        return self.net(x)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/encoder.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom .dilated_conv import DilatedConvEncoder\n\ndef generate_continuous_mask(B, T, n=5, l=0.1):\n    res = torch.full((B, T), True, dtype=torch.bool)\n    if isinstance(n, float):\n        n = int(n * T)\n    n = max(min(n, T // 2), 1)\n    \n    if isinstance(l, float):\n        l = int(l * T)\n    l = max(l, 1)\n    \n    for i in range(B):\n        for _ in range(n):\n            t = np.random.randint(T-l+1)\n            res[i, t:t+l] = False\n    return res\n\ndef generate_binomial_mask(B, T, p=0.5):\n    return torch.from_numpy(np.random.binomial(1, p, size=(B, T))).to(torch.bool)\n\nclass TSEncoder(nn.Module):\n    def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, mask_mode='binomial'):\n        super().__init__()\n        self.input_dims = input_dims\n        self.output_dims = output_dims\n        self.hidden_dims = hidden_dims\n        self.mask_mode = mask_mode\n        self.input_fc = nn.Linear(input_dims, hidden_dims)\n        self.feature_extractor = DilatedConvEncoder(\n            hidden_dims,\n            [hidden_dims] * depth + [output_dims],\n            kernel_size=3\n        )\n        self.repr_dropout = nn.Dropout(p=0.1)\n        \n    def forward(self, x, mask=None):  # x: B x T x input_dims\n        nan_mask = ~x.isnan().any(axis=-1)\n        x[~nan_mask] = 0\n        x = self.input_fc(x)  # B x T x Ch\n        \n        # generate & apply mask\n        if mask is None:\n            if self.training:\n                mask = self.mask_mode\n            else:\n                mask = 'all_true'\n        \n        if mask == 'binomial':\n            mask = generate_binomial_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'continuous':\n            mask = generate_continuous_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'all_true':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n        elif mask == 'all_false':\n            mask = x.new_full((x.size(0), x.size(1)), False, dtype=torch.bool)\n        elif mask == 'mask_last':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n            mask[:, -1] = False\n        \n        mask &= nan_mask\n        x[~mask] = 0\n        \n        # conv encoder\n        x = x.transpose(1, 2)  # B x Ch x T\n        x = self.repr_dropout(self.feature_extractor(x))  # B x Co x T\n        x = x.transpose(1, 2)  # B x T x Co\n        \n        return x\n        "
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/losses.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):\n    loss = torch.tensor(0., device=z1.device)\n    d = 0\n    while z1.size(1) > 1:\n        if alpha != 0:\n            loss += alpha * instance_contrastive_loss(z1, z2)\n        if d >= temporal_unit:\n            if 1 - alpha != 0:\n                loss += (1 - alpha) * temporal_contrastive_loss(z1, z2)\n        d += 1\n        z1 = F.max_pool1d(z1.transpose(1, 2), kernel_size=2).transpose(1, 2)\n        z2 = F.max_pool1d(z2.transpose(1, 2), kernel_size=2).transpose(1, 2)\n    if z1.size(1) == 1:\n        if alpha != 0:\n            loss += alpha * instance_contrastive_loss(z1, z2)\n        d += 1\n    return loss / d\n\ndef instance_contrastive_loss(z1, z2):\n    B, T = z1.size(0), z1.size(1)\n    if B == 1:\n        return z1.new_tensor(0.)\n    z = torch.cat([z1, z2], dim=0)  # 2B x T x C\n    z = z.transpose(0, 1)  # T x 2B x C\n    sim = torch.matmul(z, z.transpose(1, 2))  # T x 2B x 2B\n    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # T x 2B x (2B-1)\n    logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n    logits = -F.log_softmax(logits, dim=-1)\n    \n    i = torch.arange(B, device=z1.device)\n    loss = (logits[:, i, B + i - 1].mean() + logits[:, B + i, i].mean()) / 2\n    return loss\n\ndef temporal_contrastive_loss(z1, z2):\n    B, T = z1.size(0), z1.size(1)\n    if T == 1:\n        return z1.new_tensor(0.)\n    z = torch.cat([z1, z2], dim=1)  # B x 2T x C\n    sim = torch.matmul(z, z.transpose(1, 2))  # B x 2T x 2T\n    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # B x 2T x (2T-1)\n    logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n    logits = -F.log_softmax(logits, dim=-1)\n    \n    t = torch.arange(T, device=z1.device)\n    loss = (logits[:, t, T + t - 1].mean() + logits[:, T + t, t].mean()) / 2\n    return loss\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/tasks/__init__.py",
    "content": "from .anomaly_detection import eval_anomaly_detection, eval_anomaly_detection_coldstart,np_shift,eval_ad_result\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/tasks/anomaly_detection.py",
    "content": "import numpy as np\nimport time\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport bottleneck as bn\nimport pdb\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\n# set missing = 0\ndef reconstruct_label(timestamp, label):\n    timestamp = np.asarray(timestamp, np.int64)\n    index = np.argsort(timestamp)\n\n    timestamp_sorted = np.asarray(timestamp[index])\n    interval = np.min(np.diff(timestamp_sorted))\n\n    label = np.asarray(label, np.int64)\n    label = np.asarray(label[index])\n\n    idx = (timestamp_sorted - timestamp_sorted[0]) // interval\n\n    new_label = np.zeros(shape=((timestamp_sorted[-1] - timestamp_sorted[0]) // interval + 1,), dtype=np.int)\n    new_label[idx] = label\n\n    return new_label\n\n\ndef eval_ad_result(test_pred_list, test_labels_list, test_timestamps_list, delay):\n    labels = []\n    pred = []\n    for test_pred, test_labels, test_timestamps in zip(test_pred_list, test_labels_list, test_timestamps_list):\n        assert test_pred.shape == test_labels.shape == test_timestamps.shape\n        test_labels = reconstruct_label(test_timestamps, test_labels)\n        test_pred = reconstruct_label(test_timestamps, test_pred)\n        test_pred = get_range_proba(test_pred, test_labels, delay)\n        labels.append(test_labels)\n        pred.append(test_pred)\n    labels = np.concatenate(labels)\n    pred = np.concatenate(pred)\n    return {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred)\n    }\n\n\ndef np_shift(arr, num, fill_value=np.nan):\n    result = np.empty_like(arr)\n    if num > 0:\n        result[:num] = fill_value\n        result[num:] = arr[:-num]\n    elif num < 0:\n        result[num:] = fill_value\n        result[:num] = arr[-num:]\n    else:\n        result[:] = arr\n    return result\n\n\ndef eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):\n    t = time.time()\n    \n    all_train_repr = {}\n    all_test_repr = {}\n    all_train_repr_wom = {}\n    all_test_repr_wom = {}\n    for k in all_train_data:\n        print(k)\n        train_data = all_train_data[k]\n        test_data = all_test_data[k]\n\n        full_repr = model.encode(\n            np.concatenate([train_data, test_data]).reshape(1, -1, 1),\n            mask='mask_last',\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_train_repr[k] = full_repr[:len(train_data)] # (n_timestamps, repr-dims)\n        all_test_repr[k] = full_repr[len(train_data):] # (n_timestamps, repr-dims)\n\n        full_repr_wom = model.encode(\n            np.concatenate([train_data, test_data]).reshape(1, -1, 1),\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_train_repr_wom[k] = full_repr_wom[:len(train_data)] # (n_timestamps, repr-dims)\n        all_test_repr_wom[k] = full_repr_wom[len(train_data):] # (n_timestamps, repr-dims)\n\n        # print(np.shape(all_train_repr[k]))\n        # print(np.shape(all_test_repr[k]))\n        # print(np.shape(all_train_repr_wom[k]))\n        # print(np.shape(all_test_repr_wom[k]))\n        # print(\"#####################\")\n        # raise Exception('my personal exception!')\n        \n    pdb.set_trace()\n    res_log = []\n    labels_log = []\n    timestamps_log = []\n    for k in all_train_data:\n        train_data = all_train_data[k]\n        train_labels = all_train_labels[k]\n        train_timestamps = all_train_timestamps[k]\n\n        test_data = all_test_data[k]\n        test_labels = all_test_labels[k]\n        test_timestamps = all_test_timestamps[k]\n\n        train_err = np.abs(all_train_repr_wom[k] - all_train_repr[k]).sum(axis=1)\n        test_err = np.abs(all_test_repr_wom[k] - all_test_repr[k]).sum(axis=1)\n\n        ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n        train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n        test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n        train_err_adj = train_err_adj[22:]\n\n        thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n        test_res = (test_err_adj > thr) * 1\n\n        for i in range(len(test_res)):\n            if i >= delay and test_res[i-delay:i].sum() >= 1:\n                test_res[i] = 0\n\n        res_log.append(test_res)\n        labels_log.append(test_labels)\n        timestamps_log.append(test_timestamps)\n    t = time.time() - t\n    pdb.set_trace()\n    eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay)\n    eval_res['infer_time'] = t\n    return res_log, eval_res\n\n\ndef eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):\n    t = time.time()\n    \n    all_data = {}\n    all_repr = {}\n    all_repr_wom = {}\n    for k in all_train_data:\n        all_data[k] = np.concatenate([all_train_data[k], all_test_data[k]])\n        all_repr[k] = model.encode(\n            all_data[k].reshape(1, -1, 1),\n            mask='mask_last',\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_repr_wom[k] = model.encode(\n            all_data[k].reshape(1, -1, 1),\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        \n    res_log = []\n    labels_log = []\n    timestamps_log = []\n    for k in all_data:\n        data = all_data[k]\n        labels = np.concatenate([all_train_labels[k], all_test_labels[k]])\n        timestamps = np.concatenate([all_train_timestamps[k], all_test_timestamps[k]])\n        \n        err = np.abs(all_repr_wom[k] - all_repr[k]).sum(axis=1)\n        ma = np_shift(bn.move_mean(err, 21), 1)\n        err_adj = (err - ma) / ma\n        \n        MIN_WINDOW = len(data) // 10\n        thr = bn.move_mean(err_adj, len(err_adj), MIN_WINDOW) + 4 * bn.move_std(err_adj, len(err_adj), MIN_WINDOW)\n        res = (err_adj > thr) * 1\n        \n        for i in range(len(res)):\n            if i >= delay and res[i-delay:i].sum() >= 1:\n                res[i] = 0\n\n        res_log.append(res[MIN_WINDOW:])\n        labels_log.append(labels[MIN_WINDOW:])\n        timestamps_log.append(timestamps[MIN_WINDOW:])\n    t = time.time() - t\n    \n    eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay)\n    eval_res['infer_time'] = t\n    return res_log, eval_res\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/train.py",
    "content": "import torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom ts2vec import TS2Vec\nimport tasks\nimport pdb\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\ndef save_checkpoint_callback(\n    save_every=1,\n    unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n    return callback\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('dataset', help='The dataset name')\n    parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, required=False, default= 'anomaly', help='The data loader used to load the experimental data. This can be set to anomaly or anomaly_coldstart')\n    parser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=10, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=100, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=1, help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=123, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=4, help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', type=bool, default=False, help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n    args = parser.parse_args()\n    \n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n    \n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n    \n    print('Loading data... ', end='')\n    if args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data = datautils.gen_ano_train_data(all_train_data)\n        \n    elif args.loader == 'anomaly_coldstart':\n        task_type = 'anomaly_detection_coldstart'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data, _, _, _ = datautils.load_UCR('FordA')\n        \n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n        \n        \n    if args.irregular > 0:\n        raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n    \n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n    \n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n    \n    t = time.time()\n    \n    model = TS2Vec(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    # loss_log = model.fit(\n    #     train_data,\n    #     n_epochs=args.epochs,\n    #     n_iters=args.iters,\n    #     verbose=True\n    # )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\")\n    print(\"Training time(seconds): \", t)\n\n    if args.eval:\n        if task_type == 'anomaly_detection':\n            out, eval_res = tasks.eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        elif task_type == 'anomaly_detection_coldstart':\n            out, eval_res = tasks.eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/trainATbatch.py",
    "content": "import logging\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset, SequentialSampler\nfrom utils import data_slice\nimport datautils\nimport pdb\nfrom transformers.optimization import AdamW, get_cosine_schedule_with_warmup\nfrom sklearn.metrics import f1_score\nimport tasks\nfrom ATmodelbatch import AnomalyTransformer\nimport time\nimport bottleneck as bn\nimport argparse\nimport os\nimport pickle\n\nif torch.cuda.is_available():\n    torch.set_default_tensor_type('torch.cuda.DoubleTensor')\nelse:\n    torch.set_default_tensor_type('torch.DoubleTensor')\n\nlogger = logging.getLogger(__name__)\n\n\nclass Config:\n\t\n    window_size=100\n    shuffle=True\n    epochs=500\n    warmup_ratio= 0.1\n    lr= 10e-4\n    adam_epsilon= 1e-6\n    batch_size = 512\n    \n    in_channel=1\n    dataset_name = \"kpi\"\n    d_model=512\n    layers=3\n    lambda_=3\n    \n    save_dir = './save_models'\n    save_every_epoch = 2\n    \n    is_train=True\n    is_eval=True\n\n\ndef train(config,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):\n    \n    train_data = datautils.gen_ano_train_data(all_train_data)\n    config.in_channel = train_data.shape[-1]\n    train_data = data_slice(train_data,config.window_size)\n    train_data = torch.from_numpy(train_data)\n    \n    if torch.cuda.is_available():\n        train_data = train_data.cuda()\n        \n    train_dataset = TensorDataset(train_data)\n    train_dataloader = DataLoader(train_dataset, batch_size=min(config.batch_size, len(train_dataset)), shuffle=config.shuffle, drop_last=True,generator=torch.Generator(device='cuda:0'))\n\n    total_steps = int(len(train_dataloader) * config.epochs)\n    warmup_steps = max(int(total_steps * config.warmup_ratio), 200)\n    optimizer = AdamW(\n        model.parameters(),\n        lr=config.lr,\n        eps=config.adam_epsilon,\n    )\n    scheduler = get_cosine_schedule_with_warmup(\n        optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps\n    )\n    print(\"Total steps: {}\".format(total_steps))\n    print(\"Warmup steps: {}\".format(warmup_steps))\n\n\n    for epoch in range(int(config.epochs)):\n        print(epoch)\n        if (epoch+1) % config.save_every_epoch == 0:\n            path = config.save_dir+'/'+model.to_string()+'_epoch:%d' % (epoch+1)\n            os.makedirs(path,exist_ok=True)\n            torch.save(model,path+'/model.pt')\n            pdb.set_trace()\n            f1,pre,recall = evaluate(config,epoch+1,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n            print('epoch:%d\\tf1:%f\\tp:%f\\tr:%f' % (epoch+1,f1,pre,recall))\n            \n        model.zero_grad()\n        for step, batch in enumerate(train_dataloader):\n            batch=batch[0]\n            model(batch)\n            min_loss = model.min_loss(batch)\n            max_loss = model.max_loss(batch)\n            #print('minloss:%f\\tmaxloss:%f' % (min_loss.detach().cpu(),max_loss.detach().cpu()))\n            optimizer.zero_grad()\n            min_loss.backward(retain_graph=True)\n            max_loss.backward()\n            optimizer.step()\n            scheduler.step()\n            \n\ndef evaluate(config,cur_epoch,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):\n    res_log = []\n    labels_log = []\n    timestamps_log = []\n    t = time.time()\n    for k in all_train_data:\n        train_data = all_train_data[k]\n        train_labels = all_train_labels[k]\n        train_timestamps = all_train_timestamps[k]\n        train_length = train_labels.shape[0]\n\n        test_data = all_test_data[k]\n        test_labels = all_test_labels[k]\n        test_timestamps = all_test_timestamps[k]\n        test_length = test_labels.shape[0]\n        \n        train_err = model.anomaly_score_whole(train_data).detach().cpu().numpy()\n        test_err = model.anomaly_score_whole(test_data).detach().cpu().numpy()\n        \n        train_err = train_err[:train_length]\n        test_err = test_err[:test_length]\n        \n        ma = tasks.np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n        train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n        test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n        train_err_adj = train_err_adj[22:]\n\n        thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n        test_res = (test_err_adj > thr) * 1\n\n        for i in range(len(test_res)):\n            if i >= delay and test_res[i-delay:i].sum() >= 1:\n                test_res[i] = 0\n        res_log.append(test_res)\n        labels_log.append(test_labels)\n        timestamps_log.append(test_timestamps)\n        \n    t = time.time() - t\n    eval_res = tasks.eval_ad_result(res_log, labels_log, timestamps_log, delay)\n    eval_res['infer_time'] = t\n    '''\n    eval_res:{'f1':,'p':,'r':,}\n    '''\n    '''save_results'''\n    path = config.save_dir+'/'+model.to_string()+'_epoch:%d' % (cur_epoch)\n    os.makedirs(path,exist_ok=True)\n    with open(path+'/res_log.pkl','wb') as f:\n        pickle.dump(res_log,f)\n    with open(path+'/eval_res.pkl','wb') as f:\n        pickle.dump(eval_res,f)\n    with open(path+'/results.txt','w') as f:\n        f.write('f1:%f\\tp:%f\\tr:%f\\n' % (eval_res['f1'],eval_res['precision'],eval_res['recall']))\n            \n    return eval_res['f1'],eval_res['precision'],eval_res['recall']\n\ndef main(config):\n    \n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(config.dataset_name)\n\n    \n    print('data loaded!')\n    model = AnomalyTransformer(config.batch_size,config.window_size,config.in_channel,config.d_model,config.layers,config.lambda_)\n    print('model builded!')\n    print('train start!')\n    if config.is_train:\n        model.train()\n        train(config,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        '''save_trained_model'''\n        path = config.save_dir+'/'+model.to_string()+'_epoch:%d' % (config.epochs)\n        os.makedirs(path,exist_ok=True)\n        torch.save(model,path+'/model.pt')\n    \n    print('train finished! evaluating...')\n    if config.is_eval:\n        model.eval()\n        res_log,eval_res = evaluate(config,config.epochs,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n\n        \n    print('evaluate finished!')\n    \nif __name__ == \"__main__\":\n    config = Config()\n    main(config)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/ts2vec.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nfrom models import TSEncoder\nfrom models.losses import hierarchical_contrastive_loss\nfrom utils import take_per_row, split_with_nan, centerize_vary_length_series, torch_pad_nan\nimport math\nimport pdb\n\nclass TS2Vec:\n    '''The TS2Vec model'''\n    \n    def __init__(\n        self,\n        input_dims,\n        output_dims=320,\n        hidden_dims=64,\n        depth=10,\n        device='cuda',\n        lr=0.001,\n        batch_size=16,\n        max_train_length=None,\n        temporal_unit=0,\n        after_iter_callback=None,\n        after_epoch_callback=None\n    ):\n        ''' Initialize a TS2Vec model.\n        \n        Args:\n            input_dims (int): The input dimension. For a univariate time series, this should be set to 1.\n            output_dims (int): The representation dimension.\n            hidden_dims (int): The hidden dimension of the encoder.\n            depth (int): The number of hidden residual blocks in the encoder.\n            device (int): The gpu used for training and inference.\n            lr (int): The learning rate.\n            batch_size (int): The batch size.\n            max_train_length (Union[int, NoneType]): The maximum allowed sequence length for training. For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length>.\n            temporal_unit (int): The minimum unit to perform temporal contrast. When training on a very long sequence, this param helps to reduce the cost of time and memory.\n            after_iter_callback (Union[Callable, NoneType]): A callback function that would be called after each iteration.\n            after_epoch_callback (Union[Callable, NoneType]): A callback function that would be called after each epoch.\n        '''\n        \n        super().__init__()\n        self.device = device\n        self.lr = lr\n        self.batch_size = batch_size\n        self.max_train_length = max_train_length\n        self.temporal_unit = temporal_unit\n        \n        self._net = TSEncoder(input_dims=input_dims, output_dims=output_dims, hidden_dims=hidden_dims, depth=depth).to(self.device)\n        self.net = torch.optim.swa_utils.AveragedModel(self._net)\n        self.net.update_parameters(self._net)\n        \n        self.after_iter_callback = after_iter_callback\n        self.after_epoch_callback = after_epoch_callback\n        \n        self.n_epochs = 0\n        self.n_iters = 0\n    \n    def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):\n        ''' Training the TS2Vec model.\n        \n        Args:\n            train_data (numpy.ndarray): The training data. It should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            n_epochs (Union[int, NoneType]): The number of epochs. When this reaches, the training stops.\n            n_iters (Union[int, NoneType]): The number of iterations. When this reaches, the training stops. If both n_epochs and n_iters are not specified, a default setting would be used that sets n_iters to 200 for a dataset with size <= 100000, 600 otherwise.\n            verbose (bool): Whether to print the training loss after each epoch.\n            \n        Returns:\n            loss_log: a list containing the training losses on each epoch.\n        '''\n        assert train_data.ndim == 3\n        pdb.set_trace()\n        \n        if n_iters is None and n_epochs is None:\n            n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters\n        \n        if self.max_train_length is not None:\n            sections = train_data.shape[1] // self.max_train_length\n            if sections >= 2:\n                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)\n\n        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0)\n        if temporal_missing[0] or temporal_missing[-1]:\n            train_data = centerize_vary_length_series(train_data)\n                \n        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)]\n        \n        train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))\n        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)\n        \n        optimizer = torch.optim.AdamW(self._net.parameters(), lr=self.lr)\n        \n        loss_log = []\n        \n        while True:\n            if n_epochs is not None and self.n_epochs >= n_epochs:\n                break\n            \n            cum_loss = 0\n            n_epoch_iters = 0\n            \n            interrupted = False\n            for batch in train_loader:\n                if n_iters is not None and self.n_iters >= n_iters:\n                    interrupted = True\n                    break\n                \n                x = batch[0]  #(batch_size, n_timestamps, n_features)\n                # print(\"#####################\")\n                # raise Exception('my personal exception!')\n\n                if self.max_train_length is not None and x.size(1) > self.max_train_length:\n                    window_offset = np.random.randint(x.size(1) - self.max_train_length + 1)\n                    x = x[:, window_offset : window_offset + self.max_train_length]\n                x = x.to(self.device)\n                \n                ts_l = x.size(1)\n                crop_l = np.random.randint(low=2 ** (self.temporal_unit + 1), high=ts_l+1)\n                crop_left = np.random.randint(ts_l - crop_l + 1)\n                crop_right = crop_left + crop_l\n                crop_eleft = np.random.randint(crop_left + 1)\n                crop_eright = np.random.randint(low=crop_right, high=ts_l + 1)\n                crop_offset = np.random.randint(low=-crop_eleft, high=ts_l - crop_eright + 1, size=x.size(0))\n                \n                optimizer.zero_grad()\n                \n                out1 = self._net(take_per_row(x, crop_offset + crop_eleft, crop_right - crop_eleft)) \n                out1 = out1[:, -crop_l:]\n                \n                out2 = self._net(take_per_row(x, crop_offset + crop_left, crop_eright - crop_left))\n                out2 = out2[:, :crop_l]\n                \n                loss = hierarchical_contrastive_loss(\n                    out1,\n                    out2,\n                    temporal_unit=self.temporal_unit\n                )\n                \n                loss.backward()\n                optimizer.step()\n                self.net.update_parameters(self._net)\n                    \n                cum_loss += loss.item()\n                n_epoch_iters += 1\n                \n                self.n_iters += 1\n                \n                if self.after_iter_callback is not None:\n                    self.after_iter_callback(self, loss.item())\n            \n            if interrupted:\n                break\n            \n            cum_loss /= n_epoch_iters\n            loss_log.append(cum_loss)\n            if verbose:\n                print(f\"Epoch #{self.n_epochs}: loss={cum_loss}\")\n            self.n_epochs += 1\n            \n            if self.after_epoch_callback is not None:\n                self.after_epoch_callback(self, cum_loss)\n            \n        return loss_log\n    \n    def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_window=None):\n        out = self.net(x.to(self.device, non_blocking=True), mask)\n        if encoding_window == 'full_series':\n            if slicing is not None:\n                out = out[:, slicing]\n            out = F.max_pool1d(\n                out.transpose(1, 2),\n                kernel_size = out.size(1),\n            ).transpose(1, 2)\n            \n        elif isinstance(encoding_window, int):\n            out = F.max_pool1d(\n                out.transpose(1, 2),\n                kernel_size = encoding_window,\n                stride = 1,\n                padding = encoding_window // 2\n            ).transpose(1, 2)\n            if encoding_window % 2 == 0:\n                out = out[:, :-1]\n            if slicing is not None:\n                out = out[:, slicing]\n            \n        elif encoding_window == 'multiscale':\n            p = 0\n            reprs = []\n            while (1 << p) + 1 < out.size(1):\n                t_out = F.max_pool1d(\n                    out.transpose(1, 2),\n                    kernel_size = (1 << (p + 1)) + 1,\n                    stride = 1,\n                    padding = 1 << p\n                ).transpose(1, 2)\n                if slicing is not None:\n                    t_out = t_out[:, slicing]\n                reprs.append(t_out)\n                p += 1\n            out = torch.cat(reprs, dim=-1)\n            \n        else:\n            if slicing is not None:\n                out = out[:, slicing]\n            \n        return out.cpu()\n    \n    def encode(self, data, mask=None, encoding_window=None, casual=False, sliding_length=None, sliding_padding=0, batch_size=None):\n        ''' Compute representations using the model.\n        \n        Args:\n            data (numpy.ndarray): This should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            mask (str): The mask used by encoder can be specified with this parameter. This can be set to 'binomial', 'continuous', 'all_true', 'all_false' or 'mask_last'.\n            encoding_window (Union[str, int]): When this param is specified, the computed representation would the max pooling over this window. This can be set to 'full_series', 'multiscale' or an integer specifying the pooling kernel size.\n            casual (bool): When this param is set to True, the future informations would not be encoded into representation of each timestamp.\n            sliding_length (Union[int, NoneType]): The length of sliding window. When this param is specified, a sliding inference would be applied on the time series.\n            sliding_padding (int): This param specifies the contextual data length used for inference every sliding windows.\n            batch_size (Union[int, NoneType]): The batch size used for inference. If not specified, this would be the same batch size as training.\n            \n        Returns:\n            repr: The representations for data.\n        '''\n        assert self.net is not None, 'please train or load a net first'\n        assert data.ndim == 3\n        if batch_size is None:\n            batch_size = self.batch_size\n        n_samples, ts_l, _ = data.shape\n\n        org_training = self.net.training\n        self.net.eval()\n        \n        dataset = TensorDataset(torch.from_numpy(data).to(torch.float))\n        loader = DataLoader(dataset, batch_size=batch_size)\n        \n        with torch.no_grad():\n            output = []\n            for batch in loader:\n                x = batch[0]\n                if sliding_length is not None:\n                    reprs = []\n                    if n_samples < batch_size:\n                        calc_buffer = []\n                        calc_buffer_l = 0\n                    for i in range(0, ts_l, sliding_length):\n                        l = i - sliding_padding\n                        r = i + sliding_length + (sliding_padding if not casual else 0)\n                        x_sliding = torch_pad_nan(\n                            x[:, max(l, 0) : min(r, ts_l)],\n                            left=-l if l<0 else 0,\n                            right=r-ts_l if r>ts_l else 0,\n                            dim=1\n                        )\n                        if n_samples < batch_size:\n                            if calc_buffer_l + n_samples > batch_size:\n                                out = self._eval_with_pooling(\n                                    torch.cat(calc_buffer, dim=0),\n                                    mask,\n                                    slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                    encoding_window=encoding_window\n                                )\n                                reprs += torch.split(out, n_samples)\n                                calc_buffer = []\n                                calc_buffer_l = 0\n                            calc_buffer.append(x_sliding)\n                            calc_buffer_l += n_samples\n                        else:\n                            out = self._eval_with_pooling(\n                                x_sliding,\n                                mask,\n                                slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                encoding_window=encoding_window\n                            )\n                            reprs.append(out)\n\n                    if n_samples < batch_size:\n                        if calc_buffer_l > 0:\n                            out = self._eval_with_pooling(\n                                torch.cat(calc_buffer, dim=0),\n                                mask,\n                                slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                encoding_window=encoding_window\n                            )\n                            reprs += torch.split(out, n_samples)\n                            calc_buffer = []\n                            calc_buffer_l = 0\n                    \n                    out = torch.cat(reprs, dim=1)\n                    if encoding_window == 'full_series':\n                        out = F.max_pool1d(\n                            out.transpose(1, 2).contiguous(),\n                            kernel_size = out.size(1),\n                        ).squeeze(1)\n                else:\n                    out = self._eval_with_pooling(x, mask, encoding_window=encoding_window)\n                    if encoding_window == 'full_series':\n                        out = out.squeeze(1)\n                        \n                output.append(out)\n                \n            output = torch.cat(output, dim=0)\n            \n        self.net.train(org_training)\n        return output.numpy()\n    \n    def save(self, fn):\n        ''' Save the model to a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        torch.save(self.net.state_dict(), fn)\n    \n    def load(self, fn):\n        ''' Load the model from a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        state_dict = torch.load(fn, map_location=self.device)\n        self.net.load_state_dict(state_dict)\n    \n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/utils.py",
    "content": "import os\nimport numpy as np\nimport pickle\nimport torch\nimport random\nfrom datetime import datetime\n\ndef pkl_save(name, var):\n    with open(name, 'wb') as f:\n        pickle.dump(var, f)\n\ndef pkl_load(name):\n    with open(name, 'rb') as f:\n        return pickle.load(f)\n\ndef split_N_pad(series,window_size):\n    assert len(series.shape)==2\n    ret=[]\n    l=series.shape[0]\n    for i in range(l//window_size):\n        ret.append(series[i*window_size:(i+1)*window_size,:])\n    left = l-l//window_size*window_size\n    '''TODO:pad'''\n    if left!=0:\n        p = np.zeros([window_size,series.shape[1]])\n        p[:left,:]=series[-left:,:]\n        ret.append(p)\n    return ret\n    \n    \n'''for AT'''\ndef data_slice(data,window_size):\n    '''\n    data : [size,length,dim]\n    '''\n    assert len(data.shape)==3\n    ret=[]\n    for i in range(data.shape[0]):\n        series = data[i]\n        ret.extend(split_N_pad(series,window_size))\n    return np.array(ret)\n        \n        \n    \ndef torch_pad_nan(arr, left=0, right=0, dim=0):\n    if left > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = left\n        arr = torch.cat((torch.full(padshape, np.nan), arr), dim=dim)\n    if right > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = right\n        arr = torch.cat((arr, torch.full(padshape, np.nan)), dim=dim)\n    return arr\n    \ndef pad_nan_to_target(array, target_length, axis=0, both_side=False):\n    assert array.dtype in [np.float16, np.float32, np.float64]\n    pad_size = target_length - array.shape[axis]\n    if pad_size <= 0:\n        return array\n    npad = [(0, 0)] * array.ndim\n    if both_side:\n        npad[axis] = (pad_size // 2, pad_size - pad_size//2)\n    else:\n        npad[axis] = (0, pad_size)\n    return np.pad(array, pad_width=npad, mode='constant', constant_values=np.nan)\n\ndef pad_zero_to_target(array, target_length, axis=0, both_side=False):\n    assert array.dtype in [np.float16, np.float32, np.float64]\n    pad_size = target_length - array.shape[axis]\n    if pad_size <= 0:\n        return array\n    npad = [(0, 0)] * array.ndim\n    if both_side:\n        npad[axis] = (pad_size // 2, pad_size - pad_size//2)\n    else:\n        npad[axis] = (0, pad_size)\n    return np.pad(array, pad_width=npad, mode='constant', constant_values=0)\n\ndef split_with_nan(x, sections, axis=0):\n    assert x.dtype in [np.float16, np.float32, np.float64]\n    arrs = np.array_split(x, sections, axis=axis)\n    target_length = arrs[0].shape[axis]\n    for i in range(len(arrs)):\n        arrs[i] = pad_nan_to_target(arrs[i], target_length, axis=axis)\n    return arrs\n\ndef take_per_row(A, indx, num_elem):\n    all_indx = indx[:,None] + np.arange(num_elem)\n    return A[torch.arange(all_indx.shape[0])[:,None], all_indx]\n\ndef centerize_vary_length_series(x):\n    prefix_zeros = np.argmax(~np.isnan(x).all(axis=-1), axis=1)\n    suffix_zeros = np.argmax(~np.isnan(x[:, ::-1]).all(axis=-1), axis=1)\n    offset = (prefix_zeros + suffix_zeros) // 2 - prefix_zeros\n    rows, column_indices = np.ogrid[:x.shape[0], :x.shape[1]]\n    offset[offset < 0] += x.shape[1]\n    column_indices = column_indices - offset[:, np.newaxis]\n    return x[rows, column_indices]\n\ndef data_dropout(arr, p):\n    B, T = arr.shape[0], arr.shape[1]\n    mask = np.full(B*T, False, dtype=np.bool)\n    ele_sel = np.random.choice(\n        B*T,\n        size=int(B*T*p),\n        replace=False\n    )\n    mask[ele_sel] = True\n    res = arr.copy()\n    res[mask.reshape(B, T)] = np.nan\n    return res\n\ndef name_with_datetime(prefix='default'):\n    now = datetime.now()\n    return prefix + '_' + now.strftime(\"%Y%m%d_%H%M%S\")\n\ndef init_dl_program(\n    device_name,\n    seed=None,\n    use_cudnn=True,\n    deterministic=False,\n    benchmark=False,\n    use_tf32=False,\n    max_threads=None\n):\n    import torch\n    if max_threads is not None:\n        torch.set_num_threads(max_threads)  # intraop\n        if torch.get_num_interop_threads() != max_threads:\n            torch.set_num_interop_threads(max_threads)  # interop\n        try:\n            import mkl\n        except:\n            pass\n        else:\n            mkl.set_num_threads(max_threads)\n        \n    if seed is not None:\n        random.seed(seed)\n        np.random.seed(seed)\n        torch.manual_seed(seed)\n        \n    if isinstance(device_name, (str, int)):\n        device_name = [device_name]\n    \n    devices = []\n    for t in reversed(device_name):\n        t_device = torch.device(t)\n        devices.append(t_device)\n        if t_device.type == 'cuda':\n            assert torch.cuda.is_available()\n            torch.cuda.set_device(t_device)\n            if seed is not None:\n                torch.cuda.manual_seed(seed)\n                torch.cuda.manual_seed_all(seed)\n\n    devices.reverse()\n    torch.backends.cudnn.enabled = use_cudnn\n    torch.backends.cudnn.deterministic = deterministic\n    torch.backends.cudnn.benchmark = benchmark\n    \n    if hasattr(torch.backends.cudnn, 'allow_tf32'):\n        torch.backends.cudnn.allow_tf32 = use_tf32\n        torch.backends.cuda.matmul.allow_tf32 = use_tf32\n        \n    return devices if len(devices) > 1 else devices[0]\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/AT_solver.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport time\nimport os\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nfrom other_anomaly_baselines.metrics.metrics import *\nfrom tadpak import evaluate\n\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch\n\n\nfrom other_anomaly_baselines.models.AnomalyTransformer import AnomalyTransformer\n\n\n# def to_var(x, volatile=False):\n#     if torch.cuda.is_available():\n#         x = x.cuda()\n#     return Variable(x, volatile=volatile)\n\n\n\nclass UniLoader_train(object):\n    def __init__(self, data_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size])\n\n\nclass UniLoader_test(object):\n    def __init__(self, data_set, label_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n        self.train_labels = label_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size]), np.float32(self.train_labels[0:self.win_size])\n\n\ndef split_N_pad(series,window_size):\n    assert len(series.shape)==2\n    ret=[]\n    l=series.shape[0]\n    for i in range(l//window_size):\n        ret.append(series[i*window_size:(i+1)*window_size,:])\n    left = l-l//window_size*window_size\n    '''TODO:pad'''\n    if left!=0:\n        p = np.zeros([window_size,series.shape[1]])\n        p[:left,:]=series[-left:,:]\n        ret.append(p)\n    return ret\n\n\ndef mkdir(directory):\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\n\ndef my_kl_loss(p, q):\n    res = p * (torch.log(p + 0.0001) - torch.log(q + 0.0001))\n    return torch.mean(torch.sum(res, dim=-1), dim=1)\n\n\ndef adjust_learning_rate(optimizer, epoch, lr_):\n    lr_adjust = {epoch: lr_ * (0.5 ** ((epoch - 1) // 1))}\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group['lr'] = lr\n        print('Updating learning rate to {}'.format(lr))\n\n\nclass EarlyStopping:\n    def __init__(self, patience=7, verbose=False, dataset_name='', delta=0):\n        self.patience = patience\n        self.verbose = verbose\n        self.counter = 0\n        self.best_score = None\n        self.best_score2 = None\n        self.early_stop = False\n        self.val_loss_min = np.Inf\n        self.val_loss2_min = np.Inf\n        self.delta = delta\n        self.dataset = dataset_name\n\n    def __call__(self, val_loss, val_loss2, model, path):\n        score = -val_loss\n        score2 = -val_loss2\n        if self.best_score is None:\n            self.best_score = score\n            self.best_score2 = score2\n            self.save_checkpoint(val_loss, val_loss2, model, path)\n        elif score < self.best_score + self.delta or score2 < self.best_score2 + self.delta:\n            self.counter += 1\n            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n            if self.counter >= self.patience:\n                self.early_stop = True\n        else:\n            self.best_score = score\n            self.best_score2 = score2\n            self.save_checkpoint(val_loss, val_loss2, model, path)\n            self.counter = 0\n\n    def save_checkpoint(self, val_loss, val_loss2, model, path):\n        if self.verbose:\n            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')\n        torch.save(model.state_dict(), os.path.join(path, str(self.dataset) + '_checkpoint.pth'))\n        self.val_loss_min = val_loss\n        self.val_loss2_min = val_loss2\n\n\nclass Solver(object):\n    DEFAULTS = {}\n\n    def __init__(self, config, train_set, train_loader, val_set, val_loader, test_set, test_loader, dev_cuda):\n\n        self.__dict__.update(Solver.DEFAULTS, **config)\n\n        self.train_loader = train_loader\n        self.vali_loader = val_loader\n        self.test_loader = test_loader\n        self.device = dev_cuda\n\n        self.build_model()\n        # self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n        self.criterion = nn.MSELoss()\n\n    def build_model(self):\n        self.model = AnomalyTransformer(win_size=self.win_size, enc_in=self.input_c, c_out=self.output_c, e_layers=3, cud_device=self.device)\n        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n        # if torch.cuda.is_available():\n        self.model.to(self.device)\n\n    def vali(self, vali_loader):\n        self.model.eval()\n\n        loss_1 = []\n        loss_2 = []\n        for i, (input_data, _) in enumerate(vali_loader):\n            input = input_data.float().to(self.device)\n            output, series, prior, _ = self.model(input)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                series_loss += (torch.mean(my_kl_loss(series[u], (\n                        prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                               self.win_size)).detach())) + torch.mean(\n                    my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)).detach(),\n                        series[u])))\n                prior_loss += (torch.mean(\n                    my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)),\n                               series[u].detach())) + torch.mean(\n                    my_kl_loss(series[u].detach(),\n                               (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n            series_loss = series_loss / len(prior)\n            prior_loss = prior_loss / len(prior)\n\n            rec_loss = self.criterion(output, input)\n            loss_1.append((rec_loss - self.k * series_loss).item())\n            loss_2.append((rec_loss + self.k * prior_loss).item())\n\n        return np.average(loss_1), np.average(loss_2)\n\n    def train(self):\n\n        print(\"======================TRAIN MODE======================\")\n\n        time_now = time.time()\n        path = self.model_save_path\n        if not os.path.exists(path):\n            os.makedirs(path)\n        early_stopping = EarlyStopping(patience=3, verbose=True, dataset_name=self.dataset)\n        train_steps = len(self.train_loader)\n\n        for epoch in range(self.num_epochs):\n            iter_count = 0\n            loss1_list = []\n\n            epoch_time = time.time()\n            self.model.train()\n            for i, (input_data, labels) in enumerate(self.train_loader):\n\n                self.optimizer.zero_grad()\n                iter_count += 1\n                input = input_data.float().to(self.device)\n\n                output, series, prior, _ = self.model(input)\n\n                # calculate Association discrepancy\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    series_loss += (torch.mean(my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach())) + torch.mean(\n                        my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                           self.win_size)).detach(),\n                                   series[u])))\n                    prior_loss += (torch.mean(my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach())) + torch.mean(\n                        my_kl_loss(series[u].detach(), (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n                series_loss = series_loss / len(prior)\n                prior_loss = prior_loss / len(prior)\n\n                rec_loss = self.criterion(output, input)\n\n                loss1_list.append((rec_loss - self.k * series_loss).item())\n                loss1 = rec_loss - self.k * series_loss\n                loss2 = rec_loss + self.k * prior_loss\n\n                if (i + 1) % 100 == 0:\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                # Minimax strategy\n                loss1.backward(retain_graph=True)\n                loss2.backward()\n                self.optimizer.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(loss1_list)\n\n            vali_loss1, vali_loss2 = self.vali(self.vali_loader)\n\n            print(\n                \"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} \".format(\n                    epoch + 1, train_steps, train_loss, vali_loss1))\n            early_stopping(vali_loss1, vali_loss2, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)\n\n    def test(self, ucr_index=None):\n        self.model.load_state_dict(\n            torch.load(\n                os.path.join(str(self.model_save_path), str(self.dataset) + '_checkpoint.pth')))\n        self.model.eval()\n        temperature = 50\n\n        print(\"======================TEST MODE======================\")\n\n        criterion = nn.MSELoss(reduce=False)\n\n        # (1) stastic on the train set\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.train_loader):\n            input = input_data.float().to(self.device)\n            output, series, prior, _ = self.model(input)\n            loss = torch.mean(criterion(input, output), dim=-1)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n            cri = metric * loss\n            cri = cri.detach().cpu().numpy()\n            attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        train_energy = np.array(attens_energy)\n\n        # (2) find the threshold\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.test_loader):\n            input = input_data.float().to(self.device)\n            output, series, prior, _ = self.model(input)\n\n            loss = torch.mean(criterion(input, output), dim=-1)\n\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n            # Metric\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n            cri = metric * loss\n            cri = cri.detach().cpu().numpy()\n            attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        combined_energy = np.concatenate([train_energy, test_energy], axis=0)\n        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)\n        print(\"Threshold :\", thresh)\n\n        # (3) evaluation on the test set\n        test_labels = []\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.test_loader):\n            input = input_data.float().to(self.device)\n            output, series, prior, _ = self.model(input)\n\n            loss = torch.mean(criterion(input, output), dim=-1)\n\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n\n            cri = metric * loss\n            cri = cri.detach().cpu().numpy()\n            attens_energy.append(cri)\n            test_labels.append(labels)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        test_labels = np.array(test_labels)\n\n        pred = (test_energy > thresh).astype(int)\n\n        gt = test_labels.astype(int)\n\n        print(\"pred:   \", pred.shape)\n        print(\"gt:     \", gt.shape)\n\n        # results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)\n        # results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)\n        # results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)\n        #\n        # eval_res = {\n        #     'f1': None,\n        #     'precision': None,\n        #     'recall': None,\n        #     \"Affiliation precision\": None,\n        #     \"Affiliation recall\": None,\n        #     \"R_AUC_ROC\": None,\n        #     \"R_AUC_PR\": None,\n        #     \"VUS_ROC\": None,\n        #     \"VUS_PR\": None,\n        #     'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n        #     'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n        #     'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n        # }\n\n        # results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)\n        # results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)\n        # results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)\n\n        eval_res = {\n            'f1': None,\n            'precision': None,\n            'recall': None,\n            \"Affiliation precision\": None,\n            \"Affiliation recall\": None,\n            \"R_AUC_ROC\": None,\n            \"R_AUC_PR\": None,\n            \"VUS_ROC\": None,\n            \"VUS_PR\": None,\n            'f1_pa_10': None,\n            'f1_pa_50': None,\n            'f1_pa_90': None,\n        }\n\n        if ucr_index == 79 or ucr_index == 108 or ucr_index == 187 or ucr_index == 203:\n            pass\n        else:\n\n            # # matrix = [self.index]\n            scores_simple = combine_all_evaluation_scores(pred, gt, test_energy)\n            for key, value in scores_simple.items():\n                # matrix.append(value)\n                if key == 'Affiliation precision':\n                    eval_res[\"Affiliation precision\"] = value\n                if key == 'Affiliation recall':\n                    eval_res[\"Affiliation recall\"] = value\n                if key == 'R_AUC_ROC':\n                    eval_res[\"R_AUC_ROC\"] = value\n                if key == 'R_AUC_PR':\n                    eval_res[\"R_AUC_PR\"] = value\n                if key == 'VUS_ROC':\n                    eval_res[\"VUS_ROC\"] = value\n                if key == 'VUS_PR':\n                    eval_res[\"VUS_PR\"] = value\n\n                print('{0:21} : {1:0.4f}'.format(key, value))\n\n        # detection adjustment: please see this issue for more information https://github.com/thuml/Anomaly-Transformer/issues/14\n        anomaly_state = False\n        for i in range(len(gt)):\n            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n                anomaly_state = True\n                for j in range(i, 0, -1):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n                for j in range(i, len(gt)):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n            elif gt[i] == 0:\n                anomaly_state = False\n            if anomaly_state:\n                pred[i] = 1\n\n        pred = np.array(pred)\n        gt = np.array(gt)\n        print(\"pred: \", pred.shape)\n        print(\"gt:   \", gt.shape)\n\n        from sklearn.metrics import precision_recall_fscore_support\n        from sklearn.metrics import accuracy_score\n        accuracy = accuracy_score(gt, pred)\n        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred,\n                                                                              average='binary')\n        print(\n            \"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} \".format(\n                accuracy, precision,\n                recall, f_score))\n\n        eval_res['f1'] = f_score\n        eval_res['precision'] = precision\n        eval_res['recall'] = recall\n\n        return eval_res\n\n\n    def train_uni(self):\n\n        print(\"======================TRAIN MODE======================\")\n\n        time_now = time.time()\n        path = self.model_save_path\n        if not os.path.exists(path):\n            os.makedirs(path)\n        early_stopping = EarlyStopping(patience=3, verbose=True, dataset_name=self.dataset)\n        train_steps = len(self.train_loader)\n\n        for epoch in range(self.num_epochs):\n            iter_count = 0\n            loss1_list = []\n\n            epoch_time = time.time()\n            self.model.train()\n            for i, input_data in enumerate(self.train_loader):\n\n                self.optimizer.zero_grad()\n                iter_count += 1\n                # print(\"type(input_data) = \", type(input_data), len(input_data))\n                # # input_data = np.array(input_data)\n                print(\"type(input_data) = \", type(input_data), input_data.shape)\n                input = input_data.float().to(self.device)\n\n                output, series, prior, _ = self.model(input)\n\n                # calculate Association discrepancy\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    series_loss += (torch.mean(my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach())) + torch.mean(\n                        my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                           self.win_size)).detach(),\n                                   series[u])))\n                    prior_loss += (torch.mean(my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach())) + torch.mean(\n                        my_kl_loss(series[u].detach(), (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n                series_loss = series_loss / len(prior)\n                prior_loss = prior_loss / len(prior)\n\n                rec_loss = self.criterion(output, input)\n\n                loss1_list.append((rec_loss - self.k * series_loss).item())\n                loss1 = rec_loss - self.k * series_loss\n                loss2 = rec_loss + self.k * prior_loss\n\n                if (i + 1) % 100 == 0:\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                # Minimax strategy\n                loss1.backward(retain_graph=True)\n                loss2.backward()\n                self.optimizer.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(loss1_list)\n\n            vali_loss1, vali_loss2 = self.vali(self.vali_loader)\n\n            print(\n                \"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f}\".format(\n                    epoch + 1, train_steps, train_loss))\n            early_stopping(vali_loss1, vali_loss2, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)\n\n    def test_uni(self, all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, config):\n        # self.model.load_state_dict(\n        #     torch.load(\n        #         os.path.join(str(self.model_save_path), str(self.dataset) + '_checkpoint.pth')))\n        self.model.eval()\n        temperature = 50\n\n        print(\"======================TEST MODE======================\")\n\n        criterion = nn.MSELoss(reduce=False)\n\n        # (1) stastic on the train set\n        attens_energy = []\n        for k in all_train_data:\n            train_data = all_train_data[k]\n\n            train_data = np.array(train_data)\n\n            # train_data =\n            train_data = np.expand_dims(train_data, axis=-1)\n            train_dataset = UniLoader_train(train_data, config.win_size, 1)\n\n            train_loader = DataLoader(dataset=train_dataset,\n                                      batch_size=config.batch_size,\n                                      shuffle=True,\n                                      num_workers=2,\n                                      drop_last=True)\n\n            # train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))\n            # train_loader = DataLoader(train_dataset, batch_size=min(config.batch_size, len(train_dataset)),\n            #                           shuffle=True,\n            #                           drop_last=True)\n            for i, input_data in enumerate(train_loader):\n                # print(\"type(input) = \", type(input_data), input_data.shape)\n                input = input_data.float().to(self.device)\n                output, series, prior, _ = self.model(input)\n                loss = torch.mean(criterion(input, output), dim=-1)\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    if u == 0:\n                        series_loss = my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss = my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                    else:\n                        series_loss += my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss += my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n\n                metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n                cri = metric * loss\n                cri = cri.detach().cpu().numpy()\n                attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        train_energy = np.array(attens_energy)\n\n        # (2) find the threshold\n        attens_energy = []\n        for k in all_train_data:\n            _test_labels = all_test_labels[k]\n            test_data = all_test_data[k]\n\n            test_data = np.array(test_data)\n\n            test_data = np.expand_dims(test_data, axis=-1)\n\n            test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)\n\n            test_loader = DataLoader(dataset=test_dataset,\n                                      batch_size=config.batch_size,\n                                      shuffle=True,\n                                      num_workers=2,\n                                      drop_last=True)\n\n            for i, (input_data, labels) in enumerate(test_loader):\n                input = input_data.float().to(self.device)\n                output, series, prior, _ = self.model(input)\n\n                loss = torch.mean(criterion(input, output), dim=-1)\n\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    if u == 0:\n                        series_loss = my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss = my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                    else:\n                        series_loss += my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss += my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                # Metric\n                metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n                cri = metric * loss\n                cri = cri.detach().cpu().numpy()\n                attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        combined_energy = np.concatenate([train_energy, test_energy], axis=0)\n        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)\n        print(\"Threshold :\", thresh)\n\n        # (3) evaluation on the test set\n        test_labels_list = []\n        attens_energy = []\n        for k in all_train_data:\n            _test_labels = all_test_labels[k]\n            # test_labels_list.append(_test_labels)\n\n            test_data = all_test_data[k]\n\n            test_data = np.array(test_data)\n\n            test_data = np.expand_dims(test_data, axis=-1)\n\n            test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)\n\n            test_loader = DataLoader(dataset=test_dataset,\n                                     batch_size=config.batch_size,\n                                     shuffle=True,\n                                     num_workers=2,\n                                     drop_last=True)\n\n            # test_dataset = TensorDataset(torch.from_numpy(test_data).to(torch.float), torch.from_numpy(_test_labels).float())\n            # test_loader = DataLoader(test_dataset, batch_size=min(config.batch_size, len(test_dataset)),\n            #                          shuffle=True,\n            #                          drop_last=True)\n\n            for i, (input_data, labels) in enumerate(test_loader):\n                input = input_data.float().to(self.device)\n                output, series, prior, _ = self.model(input)\n\n                loss = torch.mean(criterion(input, output), dim=-1)\n\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    if u == 0:\n                        series_loss = my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss = my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                    else:\n                        series_loss += my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss += my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n\n                cri = metric * loss\n                cri = cri.detach().cpu().numpy()\n                attens_energy.append(cri)\n                test_labels_list.append(labels)\n\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_labels = np.concatenate(test_labels_list, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        test_labels = np.array(test_labels)\n\n        # attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        # test_labels = np.concatenate(test_labels_list, axis=0).reshape(-1)\n        # test_energy = np.array(attens_energy)\n        # test_labels = np.array(test_labels)\n\n        pred = (test_energy > thresh).astype(int)\n\n        gt = test_labels.astype(int)\n\n        print(\"pred:   \", pred.shape)\n        print(\"gt:     \", gt.shape)\n\n        # results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)\n        # results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)\n        # results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)\n\n        eval_res = {\n            'f1': None,\n            'precision': None,\n            'recall': None,\n            \"Affiliation precision\": None,\n            \"Affiliation recall\": None,\n            \"R_AUC_ROC\": None,\n            \"R_AUC_PR\": None,\n            \"VUS_ROC\": None,\n            \"VUS_PR\": None,\n            # 'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n            # 'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n            # 'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n        }\n\n        # matrix = [self.index]\n        scores_simple = combine_all_evaluation_scores(pred, gt, test_energy)\n        for key, value in scores_simple.items():\n            # matrix.append(value)\n            if key == 'Affiliation precision':\n                eval_res[\"Affiliation precision\"] = value\n            if key == 'Affiliation recall':\n                eval_res[\"Affiliation recall\"] = value\n            if key == 'R_AUC_ROC':\n                eval_res[\"R_AUC_ROC\"] = value\n            if key == 'R_AUC_PR':\n                eval_res[\"R_AUC_PR\"] = value\n            if key == 'VUS_ROC':\n                eval_res[\"VUS_ROC\"] = value\n            if key == 'VUS_PR':\n                eval_res[\"VUS_PR\"] = value\n\n            print('{0:21} : {1:0.4f}'.format(key, value))\n\n        # detection adjustment: please see this issue for more information https://github.com/thuml/Anomaly-Transformer/issues/14\n        anomaly_state = False\n        for i in range(len(gt)):\n            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n                anomaly_state = True\n                for j in range(i, 0, -1):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n                for j in range(i, len(gt)):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n            elif gt[i] == 0:\n                anomaly_state = False\n            if anomaly_state:\n                pred[i] = 1\n\n        pred = np.array(pred)\n        gt = np.array(gt)\n        print(\"pred: \", pred.shape)\n        print(\"gt:   \", gt.shape)\n\n        from sklearn.metrics import precision_recall_fscore_support\n        from sklearn.metrics import accuracy_score\n        accuracy = accuracy_score(gt, pred)\n        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred,\n                                                                              average='binary')\n        print(\n            \"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} \".format(\n                accuracy, precision,\n                recall, f_score))\n\n        eval_res['f1'] = f_score\n        eval_res['precision'] = precision\n        eval_res['recall'] = recall\n\n        return eval_res\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/ATmodelbatch.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nimport numpy as np\nfrom utils import data_slice, split_N_pad\nimport time\nfrom torch.utils.data import DataLoader, TensorDataset, SequentialSampler\n\nif torch.cuda.is_available():\n    torch.set_default_tensor_type('torch.cuda.DoubleTensor')\nelse:\n    torch.set_default_tensor_type('torch.DoubleTensor')\n\n\nclass AnomalyAttention(nn.Module):\n    def __init__(self, N, d_model):\n        super(AnomalyAttention, self).__init__()\n        self.d_model = d_model\n        self.N = N\n\n        self.Wq = nn.Linear(d_model, d_model, bias=False)\n        self.Wk = nn.Linear(d_model, d_model, bias=False)\n        self.Wv = nn.Linear(d_model, d_model, bias=False)\n        self.Ws = nn.Linear(d_model, 1, bias=False)\n        self.Q = self.K = self.V = self.sigma = torch.zeros((N, d_model))\n        self.P = torch.zeros((N, N))\n        self.S = torch.zeros((N, N))\n\n    def forward(self, x):\n        # x :[batch,N,d_model]\n        self.initialize(x)\n        self.S = self.series_association()\n        self.P = self.prior_association()\n        Z = self.reconstruction()\n        return Z\n\n    def initialize(self, x):\n        self.Q = self.Wq(x)\n        self.K = self.Wk(x)\n        self.V = self.Wv(x)\n        self.sigma = self.Ws(x)\n\n    @staticmethod\n    def gaussian_kernel(mean, sigma):\n        normalize = 1 / (math.sqrt(2 * torch.pi) * torch.abs(sigma))\n        return normalize * torch.exp(-0.5 * (mean / sigma).pow(2))\n\n    def prior_association(self):\n        # qwe = torch.from_numpy(\n        #     np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])\n        # ).cuda\n        qwe = torch.from_numpy(\n            np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])\n        )\n        if torch.cuda.is_available():\n            qwe = qwe.cuda()\n        # 原 gaussian: [batch,N,N]\n        # 因为是高斯所以这里行列求和都一样\n        gaussian = self.gaussian_kernel(qwe.double(), self.sigma)\n        gaussian /= gaussian.sum(dim=-1).view(-1, self.N, 1)\n        return gaussian\n\n    def series_association(self):\n        # 原 [N,N]\n        # return F.softmax(self.Q @ self.K.T / math.sqrt(self.d_model), dim=0)\n        # 现 [batch,N,N],是列方向的softmax？,应该是不对的，得改成行方向的softmax，根据下游的reconstruction来看\n        return F.softmax(torch.matmul(self.Q, self.K.transpose(1, 2)) / math.sqrt(self.d_model), dim=2)\n\n    def reconstruction(self):\n        return torch.matmul(self.S, self.V)\n\n\nclass AnomalyTransformerBlock(nn.Module):\n    def __init__(self, N, d_model):\n        super().__init__()\n        self.N, self.d_model = N, d_model\n\n        self.attention = AnomalyAttention(self.N, self.d_model)\n        self.ln1 = nn.LayerNorm(self.d_model)\n        self.ff = nn.Sequential(nn.Linear(self.d_model, self.d_model), nn.ReLU())\n        self.ln2 = nn.LayerNorm(self.d_model)\n\n    def forward(self, x):\n        # x: [batch,N,d_model]\n        x_identity = x\n        x = self.attention(x)\n        z = self.ln1(x + x_identity)\n        z_identity = z\n        z = self.ff(z)\n        z = self.ln2(z + z_identity)\n\n        # z: [batch,N,d_model]\n        return z\n\n\nclass AnomalyTransformer(nn.Module):\n    def __init__(self, batch_size, N, in_channel, d_model, layers, lambda_):\n        super().__init__()\n        self.batch_size = batch_size\n        self.in_channel = in_channel\n        self.N = N\n        self.d_model = d_model\n\n        self.input2hidden = nn.Linear(self.in_channel, self.d_model)\n        self.hidden2output = nn.Linear(self.d_model, self.in_channel)\n        self.blocks = nn.ModuleList(\n            [AnomalyTransformerBlock(self.N, self.d_model) for _ in range(layers)]\n        )\n        self.output = None\n        self.lambda_ = lambda_\n\n        self.P_layers = []\n        self.S_layers = []\n\n    def to_string(self):\n        return 'in_channel:%d_N:%d_dmodel:%d_' % (self.in_channel, self.N, self.d_model)\n\n    def forward(self, x):\n\n        # x: [batch,N,in_channel]\n        self.P_layers = []\n        self.S_layers = []\n        x = self.input2hidden(x)\n        for idx, block in enumerate(self.blocks):\n            x = block(x)\n            # x: [batch,N,d_model]\n            self.P_layers.append(block.attention.P)\n            self.S_layers.append(block.attention.S)\n        self.output = self.hidden2output(x)\n        # output: [batch,N,in_channel]\n        return self.output\n\n    # def layer_association_discrepancy(self, Pl, Sl, x):\n    #     rowwise_kl = lambda row: (\n    #         F.kl_div(Pl[row, :], Sl[row, :]) + F.kl_div(Sl[row, :], Pl[row, :])\n    #     )\n    #     ad_vector = torch.concat(\n    #         [rowwise_kl(row).unsqueeze(0) for row in range(Pl.shape[0])]\n    #     )\n    #     return ad_vector\n    # ad_vector: [N]\n\n    # def rowwise_kl (self,Pl,Sl,idx,row):\n    #     return F.kl_div(Pl[idx,row, :], Sl[idx,row, :]) + F.kl_div(Sl[idx,row, :], Pl[idx,row, :])\n    # def layer_association_discrepancy(self, Pl, Sl, x):\n\n    #     wholetmp=[]\n    #     for idx in range(Pl.shape[0]):\n    #         rowtmp=[]\n    #         for row in range(Pl.shape[1]):\n    #             rowtmp.append(self.rowwise_kl(Pl,Sl,idx,row).unsqueeze(0))\n    #         wholetmp.append(torch.cat(rowtmp))\n\n    #     ad_vector = torch.cat(\n    #         wholetmp\n    #     ).reshape([-1,Pl.shape[1]])\n    #     #ad_vector: [batch,N]\n    #     return ad_vector\n\n    def rowwise_kl(self, row, Pl, Sl, eps=1e-4):\n        Pl_r = Pl[:, row, :]\n        Sl_r = Sl[:, row, :]\n        Pl_r = (Pl_r + eps) / torch.sum(Pl_r + eps, dim=-1, keepdims=True)\n        Sl_r = (Sl_r + eps) / torch.sum(Sl_r + eps, dim=-1, keepdims=True)\n        '''TODO:改这个函数'''\n        ret = torch.sum(\n            F.kl_div(torch.log(Pl_r), Sl_r, reduction='none') + F.kl_div(torch.log(Sl_r), Pl_r, reduction='none'), dim=1\n        )\n        return ret\n\n    def layer_association_discrepancy(self, Pl, Sl, x):\n        ad_vector = torch.concat(\n            [self.rowwise_kl(row, Pl, Sl).unsqueeze(1) for row in range(Pl.shape[1])], dim=1\n        )\n        return ad_vector\n\n    def association_discrepancy(self, P_list, S_list, x):\n\n        ret = (1 / len(P_list)) * sum(\n            [\n                self.layer_association_discrepancy(P, S, x)\n                for P, S in zip(P_list, S_list)\n            ]\n        )\n        # ret: [batch,N]\n        return ret\n\n    def loss_function(self, x_hat, P_list, S_list, lambda_, x):\n        # P_list: [layers,batch,N,N]\n        # S_list: [layers,batch,N,N]\n        frob_norm = torch.linalg.matrix_norm(x_hat - x, ord=\"fro\")\n        ret = frob_norm - (\n                lambda_\n                * torch.linalg.norm(self.association_discrepancy(P_list, S_list, x), dim=1, ord=1)\n        )\n        return ret.mean()\n\n    def min_loss(self, x):\n\n        P_list = self.P_layers\n        S_list = [S.detach() for S in self.S_layers]\n        # S_list = self.S_layers\n        lambda_ = -self.lambda_\n        return self.loss_function(self.output, P_list, S_list, lambda_, x)\n\n    def max_loss(self, x):\n        P_list = [P.detach() for P in self.P_layers]\n        # P_list = self.P_layers\n        S_list = self.S_layers\n        lambda_ = self.lambda_\n        return self.loss_function(self.output, P_list, S_list, lambda_, x)\n\n    def anomaly_score_whole(self, x):\n        # x:[length,dim]\n        x = np.array(split_N_pad(x.reshape([-1, 1]), self.N))\n        '''TODO:测试data_slice'''\n        data = torch.from_numpy(x)\n        if torch.cuda.is_available():\n            data = data.cuda()\n        dataset = TensorDataset(data)\n        dataloader = DataLoader(dataset, batch_size=min(self.batch_size, len(dataset)), shuffle=False, drop_last=False)\n        scores = []\n        for step, batch in enumerate(dataloader):\n            batch = batch[0]\n            score = self.anomaly_score(batch)\n            scores.append(score)\n        return torch.cat(scores).flatten()\n\n    def anomaly_score(self, x):\n        # 原 x:[N,in_channel]\n        output = self.forward(x)\n        tmp = -self.association_discrepancy(self.P_layers, self.S_layers, x)\n        ad = F.softmax(\n            tmp, dim=0\n        )\n        assert ad.shape[1] == self.N\n\n        # norm = torch.tensor(\n        #     [\n        #         torch.linalg.norm(x[i, :] - self.output[i, :], ord=2)\n        #         for i in range(self.N)\n        #     ]\n        # )\n        norm = []\n        for idx in range(x.shape[0]):\n            tmp = torch.tensor(\n                [\n                    torch.linalg.norm(x[idx, i, :] - self.output[idx, i, :], ord=2)\n                    for i in range(self.N)\n                ]\n            )\n            norm.append(tmp)\n        norm = torch.cat(norm).reshape([-1, self.N])\n        assert norm.shape[1] == self.N\n        score = torch.mul(ad, norm)\n        return score\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/README.md",
    "content": "## README_Anomaly_Detection\n\n### Usage\n\n|  ID  |                            Method                            | Year |   Press   |                         Source Code                          |\n| :--: | :----------------------------------------------------------: | :--: | :-------: | :----------------------------------------------------------: |\n|  1   |  [SPOT](https://dl.acm.org/doi/abs/10.1145/3097983.3098144)  | 2017 |    KDD    |     [github_link](https://github.com/Amossys-team/SPOT)      |\n|  2   | [DSPOT](https://dl.acm.org/doi/abs/10.1145/3097983.3098144)  | 2017 |    KDD    |     [github_link](https://github.com/Amossys-team/SPOT)      |\n|  3   | [LSTM-VAE](https://ieeexplore.ieee.org/abstract/document/8279425) | 2018 | IEEE RA.L | [github_link](https://github.com/SchindlerLiang/VAE-for-Anomaly-Detection) |\n|  4   | [DONUT](https://dl.acm.org/doi/abs/10.1145/3178876.3185996)  | 2018 |    WWW    |     [github_link](https://github.com/NetManAIOps/donut)      |\n|  5   |  [SR*](https://dl.acm.org/doi/abs/10.1145/3292500.3330680)   | 2019 |    KDD    |                              -                               |\n|  6   |            [AT](https://arxiv.org/abs/2110.02642)            | 2022 |   ICLR    | [github_link](https://github.com/spencerbraun/anomaly_transformer_pytorch) |\n|  7   | [TS2Vec](https://www.aaai.org/AAAI22Papers/AAAI-8809.YueZ.pdf) | 2022 |   AAAI    |      [github_link](https://github.com/yuezhihan/ts2vec)      |\n\n\n1. To train and evaluate SPOT/DSPOT on a dataset, set the dataset_name `dataset='yahoo' or 'kpi'`, and then run the following command:\n\n   ```python\n   python train_spot.py\n   python train_dspot.py\n   ```\n\n2. To train and evaluate LSTM-VAE on a dataset, run the following command:\n\n   ```python\n   python train_lstm_vae.py <dataset_name> <run_name> --loader <loader> --gpu <gpu_device_id> --seed 42 --eval\n   ```\n\n    `dataset_name`: The dataset name.\n\n    `run_name`: The folder name used to save model, output and evaluation metrics. This can be set to any word.\n\n    `loader`: The data loader used to load the experimental data.\n\n    `gpu_device_id`: The GPU device's ID. This can be  `0,1,2...`\n\n3. To train and evaluate DONUT on a dataset, run the following command:\n\n   ```python\n   python train_donut.py <dataset_name> <run_name> --loader <loader> --gpu <gpu_device_id> --seed 42 --eval\n   ```\n\n4. The anomaly detection results of the SR are collected from the original [SR](https://dl.acm.org/doi/abs/10.1145/3292500.3330680) article.\n\n5. To train and evaluate AT on a dataset,  set hyper_parameters in the file  `trainATbatch.py` , and then run the following command:\n\n   ```python\n   python trainATbatch.py\n   ```\n\n6. To train and evaluate TS2Vec on a dataset, run the following command:\n\n   ```python\n   python train_ts2vec.py <dataset_name> <run_name> --loader <loader> --repr-dims 320 --gpu <gpu_device_id> --seed 42 --eval\n   ```\n\n7. To train and evaluate TimesNet on a dataset, run the following command:\n\n   ```python\n   python train_timesnet.py <dataset_name> <run_name> ...\n   ```\n\n8. To train and evaluate GPT4TS on a dataset, run the following command:\n\n   ```python\n   python train_gpt4ts.py <dataset_name> <run_name> ...\n   ```\n   \n9. To train and evaluate DCdetector on a dataset, run the following command:\n\n   ```python\n   python train_dcdetector.py <dataset_name> <run_name> ...\n   ```"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/dataset_read_test.py",
    "content": "import datautils\nimport numpy as np\nfrom sklearn.metrics import f1_score, precision_score, recall_score\n\n\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\n# set missing = 0\ndef reconstruct_label(timestamp, label):\n    timestamp = np.asarray(timestamp, np.int64)\n    index = np.argsort(timestamp)\n\n    timestamp_sorted = np.asarray(timestamp[index])\n    interval = np.min(np.diff(timestamp_sorted))\n\n    label = np.asarray(label, np.int64)\n    label = np.asarray(label[index])\n\n    idx = (timestamp_sorted - timestamp_sorted[0]) // interval\n\n    new_label = np.zeros(shape=((timestamp_sorted[-1] - timestamp_sorted[0]) // interval + 1,), dtype=np.int)\n    new_label[idx] = label\n\n    return new_label\n\n\ndef eval_ad_result(test_pred_list, test_labels_list, test_timestamps_list, delay):\n    labels = []\n    pred = []\n    for test_pred, test_labels, test_timestamps in zip(test_pred_list, test_labels_list, test_timestamps_list):\n        assert test_pred.shape == test_labels.shape == test_timestamps.shape\n        test_labels = reconstruct_label(test_timestamps, test_labels)\n        test_pred = reconstruct_label(test_timestamps, test_pred)\n        test_pred = get_range_proba(test_pred, test_labels, delay)\n        labels.append(test_labels)\n        pred.append(test_pred)\n    labels = np.concatenate(labels)\n    pred = np.concatenate(pred)\n    return {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred)\n    }\n\n\ndataset = 'kpi' # yahoo, kpi\nprint('Loading kpi data... ', end='')\nall_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(dataset)\n\nprint(\"type = \", type(all_train_data), type(all_train_labels), type(all_train_timestamps), type(all_test_data))\nprint(\"delay = \", delay)\ni = 1\nfor k in all_test_data:\n    print(\"i = \", i, \", k = \", k)\n    print(\"all_train_data.shape = \", all_train_data[k].shape)\n    print(\"all_train_labels.shape = \", all_train_labels[k].shape)\n    print(\"all_train_timestamps.shape = \", all_train_timestamps[k].shape)\n    print(\"all_test_data.shape = \", all_test_data[k].shape)\n    print(\"all_test_labels.shape = \", all_test_labels[k].shape)\n    print(\"all_test_timestamps.shape = \", all_test_timestamps[k].shape)\n    print(\"all_train_labels[k][:10] = \", all_train_labels[k][:10])\n    print(\"all_test_timestamps[k][:10] = \", all_test_timestamps[k][:10])\n    i = i + 1\n    break\n\n\n# dataset = 'yahoo' # yahoo, kpi\n# print('Loading yahoo data... ', end='')\n# all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(dataset)\n#\n# print(\"type = \", type(all_train_data), type(all_train_labels), type(all_train_timestamps), type(all_test_data))\n# print(\"delay = \", delay)\n# i = 1\n# for k in all_test_data:\n#     print(\"i = \", i, \", k = \", k)\n#     print(\"all_train_data.shape = \", all_train_data[k].shape)\n#     print(\"all_train_labels.shape = \", all_train_labels[k].shape)\n#     print(\"all_train_timestamps.shape = \", all_train_timestamps[k].shape)\n#     print(\"all_test_data.shape = \", all_test_data[k].shape)\n#     print(\"all_test_labels.shape = \", all_test_labels[k].shape)\n#     print(\"all_test_timestamps.shape = \", all_test_timestamps[k].shape)\n#     i = i + 1"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/datautils.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom datetime import datetime\nimport pickle\nfrom utils import pkl_load, pad_nan_to_target\nfrom scipy.io.arff import loadarff\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\ndef load_UCR(dataset):\n    train_file = os.path.join('datasets/UCR', dataset, dataset + \"_TRAIN.tsv\")\n    test_file = os.path.join('datasets/UCR', dataset, dataset + \"_TEST.tsv\")\n    train_df = pd.read_csv(train_file, sep='\\t', header=None)\n    test_df = pd.read_csv(test_file, sep='\\t', header=None)\n    train_array = np.array(train_df)\n    test_array = np.array(test_df)\n\n    # Move the labels to {0, ..., L-1}\n    labels = np.unique(train_array[:, 0])\n    transform = {}\n    for i, l in enumerate(labels):\n        transform[l] = i\n\n    train = train_array[:, 1:].astype(np.float64)\n    train_labels = np.vectorize(transform.get)(train_array[:, 0])\n    test = test_array[:, 1:].astype(np.float64)\n    test_labels = np.vectorize(transform.get)(test_array[:, 0])\n\n    # Normalization for non-normalized datasets\n    # To keep the amplitude information, we do not normalize values over\n    # individual time series, but on the whole dataset\n    if dataset not in [\n        'AllGestureWiimoteX',\n        'AllGestureWiimoteY',\n        'AllGestureWiimoteZ',\n        'BME',\n        'Chinatown',\n        'Crop',\n        'EOGHorizontalSignal',\n        'EOGVerticalSignal',\n        'Fungi',\n        'GestureMidAirD1',\n        'GestureMidAirD2',\n        'GestureMidAirD3',\n        'GesturePebbleZ1',\n        'GesturePebbleZ2',\n        'GunPointAgeSpan',\n        'GunPointMaleVersusFemale',\n        'GunPointOldVersusYoung',\n        'HouseTwenty',\n        'InsectEPGRegularTrain',\n        'InsectEPGSmallTrain',\n        'MelbournePedestrian',\n        'PickupGestureWiimoteZ',\n        'PigAirwayPressure',\n        'PigArtPressure',\n        'PigCVP',\n        'PLAID',\n        'PowerCons',\n        'Rock',\n        'SemgHandGenderCh2',\n        'SemgHandMovementCh2',\n        'SemgHandSubjectCh2',\n        'ShakeGestureWiimoteZ',\n        'SmoothSubspace',\n        'UMD'\n    ]:\n        return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels\n    \n    mean = np.nanmean(train)\n    std = np.nanstd(train)\n    train = (train - mean) / std\n    test = (test - mean) / std\n    return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels\n\ndef load_anomaly(name):\n    res = pkl_load(f'datasets/{name}.pkl')\n    return res['all_train_data'], res['all_train_labels'], res['all_train_timestamps'], \\\n           res['all_test_data'],  res['all_test_labels'],  res['all_test_timestamps'], \\\n           res['delay']\n\ndef gen_ano_train_data(all_train_data):\n    ''' Get the anomaly train data.\n    Args:\n        all_train_data(dict): all_train_data[k] (numpy.ndarray) with the shape (n_timestamps).\n    Returns:\n        pretrain_data (numpy.ndarray): padding with 'nan', the shape is (n_instance, n_timestamps, n_features).\n    '''\n    maxl = np.max([ len(all_train_data[k]) for k in all_train_data ])\n    pretrain_data = []\n    for k in all_train_data:\n        train_data = pad_nan_to_target(all_train_data[k], maxl, axis=0)\n        pretrain_data.append(train_data)\n    pretrain_data = np.expand_dims(np.stack(pretrain_data), 2)\n    return pretrain_data"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/dcdetector_solver.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport time\n# from utils.utils import *\nfrom other_anomaly_baselines.models.DCdetector import DCdetector\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\nfrom einops import rearrange\nfrom other_anomaly_baselines.metrics.metrics import *\nimport warnings\nfrom tadpak import evaluate\nfrom torch.utils.data import TensorDataset, DataLoader\n\nwarnings.filterwarnings('ignore')\n\n\nclass UniLoader_train(object):\n    def __init__(self, data_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size])\n\n\nclass UniLoader_test(object):\n    def __init__(self, data_set, label_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n        self.train_labels = label_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size]), np.float32(self.train_labels[0:self.win_size])\n\n\n\n\ndef my_kl_loss(p, q):\n    res = p * (torch.log(p + 0.0001) - torch.log(q + 0.0001))\n    return torch.mean(torch.sum(res, dim=-1), dim=1)\n\n\ndef adjust_learning_rate(optimizer, epoch, lr_):\n    lr_adjust = {epoch: lr_ * (0.5 ** ((epoch - 1) // 1))}\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group['lr'] = lr\n\n\nclass EarlyStopping:\n    def __init__(self, patience=7, verbose=False, dataset_name='', delta=0, index=0):\n        self.patience = patience\n        self.verbose = verbose\n        self.counter = 0\n        self.best_score = None\n        self.best_score2 = None\n        self.early_stop = False\n        self.val_loss_min = np.Inf\n        self.val_loss2_min = np.Inf\n        self.delta = delta\n        self.dataset = dataset_name\n        self.index = index\n\n    def __call__(self, val_loss, val_loss2, model, path):\n        score = -val_loss\n        score2 = -val_loss2\n        if self.best_score is None:\n            self.best_score = score\n            self.best_score2 = score2\n            self.save_checkpoint(val_loss, val_loss2, model, path)\n        elif score < self.best_score + self.delta or score2 < self.best_score2 + self.delta:\n            self.counter += 1\n            if self.counter >= self.patience:\n                self.early_stop = True\n        else:\n            self.best_score = score\n            self.best_score2 = score2\n            self.save_checkpoint(val_loss, val_loss2, model, path)\n            self.counter = 0\n\n    def save_checkpoint(self, val_loss, val_loss2, model, path):\n        print(\"os.path.join(path, str(self.dataset) + '_checkpoint.pth') = \", os.path.join(path, str(self.dataset) + '_checkpoint.pth'))\n        torch.save(model.state_dict(), os.path.join(path, str(self.dataset) + str(self.index) +'_checkpoint.pth'))\n        self.val_loss_min = val_loss\n        self.val_loss2_min = val_loss2\n\n\nclass Solver(object):\n    DEFAULTS = {}\n\n    def __init__(self, config, multi=True):\n\n        self.__dict__.update(Solver.DEFAULTS, **config)\n\n        if multi:\n            self.train_loader, _ = get_loader_segment(self.index, self.data_path + self.dataset, batch_size=self.batch_size,\n                                                   win_size=self.win_size, mode='train', dataset=self.dataset, )\n            self.vali_loader, _ = get_loader_segment(self.index, self.data_path + self.dataset, batch_size=self.batch_size,\n                                                  win_size=self.win_size, mode='val', dataset=self.dataset)\n            self.test_loader, _ = get_loader_segment(self.index, self.data_path + self.dataset, batch_size=self.batch_size,\n                                                  win_size=self.win_size, mode='test', dataset=self.dataset)\n            self.thre_loader, _ = get_loader_segment(self.index, self.data_path + self.dataset, batch_size=self.batch_size,\n                                                  win_size=self.win_size, mode='thre', dataset=self.dataset)\n        else:\n            self.train_loader, _ = None, None\n            self.vali_loader, _ = None, None\n            self.test_loader, _ = None, None\n            self.thre_loader, _ = None, None\n\n        self.build_model()\n\n        self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n        if self.loss_fuc == 'MAE':\n            self.criterion = nn.L1Loss()\n        elif self.loss_fuc == 'MSE':\n            self.criterion = nn.MSELoss()\n\n    def build_model(self):\n        self.model = DCdetector(win_size=self.win_size, enc_in=self.input_c, c_out=self.output_c, n_heads=self.n_heads,\n                                d_model=self.d_model, e_layers=self.e_layers, patch_size=self.patch_size,\n                                channel=self.input_c)\n\n        if torch.cuda.is_available():\n            self.model.cuda()\n\n        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n    def vali(self, vali_loader):\n        self.model.eval()\n        loss_1 = []\n        loss_2 = []\n        for i, (input_data, _) in enumerate(vali_loader):\n            input = input_data.float().to(self.device)\n            series, prior = self.model(input)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                series_loss += (torch.mean(my_kl_loss(series[u], (\n                        prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                               self.win_size)).detach())) + torch.mean(\n                    my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)).detach(),\n                        series[u])))\n                prior_loss += (torch.mean(\n                    my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)),\n                               series[u].detach())) + torch.mean(\n                    my_kl_loss(series[u].detach(),\n                               (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n\n            series_loss = series_loss / len(prior)\n            prior_loss = prior_loss / len(prior)\n\n            loss_1.append((prior_loss - series_loss).item())\n\n        return np.average(loss_1), np.average(loss_2)\n\n    def train(self):\n\n        time_now = time.time()\n        path = self.model_save_path\n        if not os.path.exists(path):\n            os.makedirs(path)\n        early_stopping = EarlyStopping(patience=5, verbose=True, dataset_name=self.dataset, index=self.index)\n        train_steps = len(self.train_loader)\n\n        for epoch in range(self.num_epochs):\n            iter_count = 0\n\n            epoch_time = time.time()\n            self.model.train()\n            # for i, data in enumerate(self.train_loader):\n            #     print(data)\n            #     break\n\n            for i, (input_data, labels) in enumerate(self.train_loader):\n\n                self.optimizer.zero_grad()\n                iter_count += 1\n                input = input_data.float().to(self.device)\n\n\n                # print(\"input = \", type(input), input.shape)\n                series, prior = self.model(input)\n\n                series_loss = 0.0\n                prior_loss = 0.0\n\n                for u in range(len(prior)):\n                    series_loss += (torch.mean(my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach())) + torch.mean(\n                        my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                           self.win_size)).detach(),\n                                   series[u])))\n                    prior_loss += (torch.mean(my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach())) + torch.mean(\n                        my_kl_loss(series[u].detach(), (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n\n                series_loss = series_loss / len(prior)\n                prior_loss = prior_loss / len(prior)\n\n                loss = prior_loss - series_loss\n\n                if (i + 1) % 100 == 0:\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                loss.backward()\n                self.optimizer.step()\n\n            vali_loss1, vali_loss2 = self.vali(self.vali_loader)\n\n            print(\n                \"Epoch: {0}, Cost time: {1:.3f}s \".format(\n                    epoch + 1, time.time() - epoch_time))\n            early_stopping(vali_loss1, vali_loss2, self.model, path)\n            if early_stopping.early_stop:\n                break\n            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)\n\n    def test(self, ucr_index=None):\n        self.model.load_state_dict(\n            torch.load(\n                os.path.join(str(self.model_save_path), str(self.dataset) + str(self.index) + '_checkpoint.pth')))\n        self.model.eval()\n        temperature = 50\n\n        # (1) stastic on the train set\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.train_loader):\n            input = input_data.float().to(self.device)\n            series, prior = self.model(input)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n            cri = metric.detach().cpu().numpy()\n            attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        train_energy = np.array(attens_energy)\n\n        # (2) find the threshold\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.thre_loader):\n            input = input_data.float().to(self.device)\n            series, prior = self.model(input)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n            cri = metric.detach().cpu().numpy()\n            attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        combined_energy = np.concatenate([train_energy, test_energy], axis=0)\n        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)\n        print(\"Threshold :\", thresh)\n\n        # (3) evaluation on the test set\n        test_labels = []\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.thre_loader):\n            input = input_data.float().to(self.device)\n            series, prior = self.model(input)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n            cri = metric.detach().cpu().numpy()\n            attens_energy.append(cri)\n            test_labels.append(labels)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        test_labels = np.array(test_labels)\n\n        pred = (test_energy > thresh).astype(int)\n        gt = test_labels.astype(int)\n\n        # labels = np.asarray(labels_log, np.int64)[0]\n\n        # print(\"test_energy.shape = \", test_energy.shape, test_labels.shape)\n        # print(\"test_energy.shape = \", test_energy[:10])\n        # print(\"test_labels.shape = \", test_labels[:10])\n        index_list =  [38, 54, 71, 72, 79, 85, 88, 108, 146, 162, 179, 180, 187, 193, 196, 203, 212, 229, 232]\n        if ucr_index in index_list:\n            eval_res = {\n                'f1': None,\n                'precision': None,\n                'recall': None,\n                \"Affiliation precision\": None,\n                \"Affiliation recall\": None,\n                \"R_AUC_ROC\": None,\n                \"R_AUC_PR\": None,\n                \"VUS_ROC\": None,\n                \"VUS_PR\": None,\n                'f1_pa_10': None,\n                'f1_pa_50': None,\n                'f1_pa_90': None,\n            }\n        else:\n\n            results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)\n            results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)\n            results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)\n\n            eval_res = {\n                'f1': None,\n                'precision': None,\n                'recall': None,\n                \"Affiliation precision\": None,\n                \"Affiliation recall\": None,\n                \"R_AUC_ROC\": None,\n                \"R_AUC_PR\": None,\n                \"VUS_ROC\": None,\n                \"VUS_PR\": None,\n                'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n                'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n                'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n            }\n\n            matrix = [self.index]\n            scores_simple = combine_all_evaluation_scores(pred, gt, test_energy)\n            for key, value in scores_simple.items():\n                matrix.append(value)\n                if key == 'Affiliation precision':\n                    eval_res[\"Affiliation precision\"] = value\n                if key == 'Affiliation recall':\n                    eval_res[\"Affiliation recall\"] = value\n                if key == 'R_AUC_ROC':\n                    eval_res[\"R_AUC_ROC\"] = value\n                if key == 'R_AUC_PR':\n                    eval_res[\"R_AUC_PR\"] = value\n                if key == 'VUS_ROC':\n                    eval_res[\"VUS_ROC\"] = value\n                if key == 'VUS_PR':\n                    eval_res[\"VUS_PR\"] = value\n\n                print('{0:21} : {1:0.4f}'.format(key, value))\n\n        anomaly_state = False\n        for i in range(len(gt)):\n            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n                anomaly_state = True\n                for j in range(i, 0, -1):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n                for j in range(i, len(gt)):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n            elif gt[i] == 0:\n                anomaly_state = False\n            if anomaly_state:\n                pred[i] = 1\n\n        pred = np.array(pred)\n        gt = np.array(gt)\n\n        from sklearn.metrics import precision_recall_fscore_support\n        from sklearn.metrics import accuracy_score\n\n        accuracy = accuracy_score(gt, pred)\n        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred, average='binary')\n        print(\n            \"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} \".format(accuracy, precision,\n                                                                                                   recall, f_score))\n\n        # if self.data_path == 'UCR' or 'UCR_AUG':\n        #     import csv\n        #     with open('result_dc/' + self.dataset + '.csv', 'a+') as f:\n        #         writer = csv.writer(f)\n        #         writer.writerow(matrix)\n\n        eval_res['f1'] = f_score\n        eval_res['precision'] = precision\n        eval_res['recall'] = recall\n\n        return eval_res\n\n    def vali_uni(self, vali_loader):\n        self.model.eval()\n        loss_1 = []\n        loss_2 = []\n        for i, input_data in enumerate(vali_loader):\n            input = input_data.float().to(self.device)\n            series, prior = self.model(input)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                series_loss += (torch.mean(my_kl_loss(series[u], (\n                        prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                               self.win_size)).detach())) + torch.mean(\n                    my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)).detach(),\n                        series[u])))\n                prior_loss += (torch.mean(\n                    my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)),\n                               series[u].detach())) + torch.mean(\n                    my_kl_loss(series[u].detach(),\n                               (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n\n            series_loss = series_loss / len(prior)\n            prior_loss = prior_loss / len(prior)\n\n            loss_1.append((prior_loss - series_loss).item())\n\n        return np.average(loss_1), np.average(loss_2)\n\n    def train_uni(self):\n\n        time_now = time.time()\n        path = self.model_save_path\n        if not os.path.exists(path):\n            os.makedirs(path)\n        early_stopping = EarlyStopping(patience=5, verbose=True, dataset_name=self.dataset, index=self.index)\n        train_steps = len(self.train_loader)\n\n        for epoch in range(self.num_epochs):\n            iter_count = 0\n\n            epoch_time = time.time()\n            self.model.train()\n            # for i, data in enumerate(self.train_loader):\n            #     print(data)\n            #     break\n\n            for i, input_data in enumerate(self.train_loader):\n\n                self.optimizer.zero_grad()\n                iter_count += 1\n                input = input_data.float().to(self.device)\n\n\n                # print(\"input = \", type(input), input.shape)\n                series, prior = self.model(input)\n\n                series_loss = 0.0\n                prior_loss = 0.0\n\n                for u in range(len(prior)):\n                    series_loss += (torch.mean(my_kl_loss(series[u], (\n                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach())) + torch.mean(\n                        my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                           self.win_size)).detach(),\n                                   series[u])))\n                    prior_loss += (torch.mean(my_kl_loss(\n                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach())) + torch.mean(\n                        my_kl_loss(series[u].detach(), (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n\n                series_loss = series_loss / len(prior)\n                prior_loss = prior_loss / len(prior)\n\n                loss = prior_loss - series_loss\n\n                if (i + 1) % 100 == 0:\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                loss.backward()\n                self.optimizer.step()\n\n            vali_loss1, vali_loss2 = self.vali_uni(self.vali_loader)\n\n            print(\n                \"Epoch: {0}, Cost time: {1:.3f}s \".format(\n                    epoch + 1, time.time() - epoch_time))\n            early_stopping(vali_loss1, vali_loss2, self.model, path)\n            if early_stopping.early_stop:\n                break\n            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)\n\n    def test_uni(self, all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, config):\n        self.model.load_state_dict(\n            torch.load(\n                os.path.join(str(self.model_save_path), str(self.dataset) + str(self.index) + '_checkpoint.pth')))\n        self.model.eval()\n        temperature = 50\n\n        # (1) stastic on the train set\n        attens_energy = []\n\n        for k in all_train_data:\n            train_data = all_train_data[k]\n\n            train_data = np.array(train_data)\n\n            # train_data =\n            train_data = np.expand_dims(train_data, axis=-1)\n            train_dataset = UniLoader_train(train_data, config.win_size, 1)\n\n            train_loader = DataLoader(dataset=train_dataset,\n                                      batch_size=config.batch_size,\n                                      shuffle=True,\n                                      num_workers=2,\n                                      drop_last=True)\n\n            for i, input_data in enumerate(train_loader):\n                input = input_data.float().to(self.device)\n                series, prior = self.model(input)\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    if u == 0:\n                        series_loss = my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss = my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                    else:\n                        series_loss += my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss += my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n\n                metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n                cri = metric.detach().cpu().numpy()\n                attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        train_energy = np.array(attens_energy)\n\n        # (2) find the threshold\n        attens_energy = []\n        for k in all_train_data:\n            _test_labels = all_test_labels[k]\n            test_data = all_test_data[k]\n\n            test_data = np.array(test_data)\n\n            test_data = np.expand_dims(test_data, axis=-1)\n\n            test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)\n\n            test_loader = DataLoader(dataset=test_dataset,\n                                     batch_size=config.batch_size,\n                                     shuffle=True,\n                                     num_workers=2,\n                                     drop_last=True)\n\n            for i, (input_data, labels) in enumerate(test_loader):\n                input = input_data.float().to(self.device)\n                series, prior = self.model(input)\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    if u == 0:\n                        series_loss = my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss = my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                    else:\n                        series_loss += my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss += my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n\n                metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n                cri = metric.detach().cpu().numpy()\n                attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        combined_energy = np.concatenate([train_energy, test_energy], axis=0)\n        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)\n        print(\"Threshold :\", thresh)\n\n        # (3) evaluation on the test set\n        test_labels = []\n        attens_energy = []\n        for k in all_train_data:\n            _test_labels = all_test_labels[k]\n            test_data = all_test_data[k]\n\n            test_data = np.array(test_data)\n\n            test_data = np.expand_dims(test_data, axis=-1)\n\n            test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)\n\n            test_loader = DataLoader(dataset=test_dataset,\n                                     batch_size=config.batch_size,\n                                     shuffle=True,\n                                     num_workers=2,\n                                     drop_last=True)\n            for i, (input_data, labels) in enumerate(test_loader):\n                input = input_data.float().to(self.device)\n                series, prior = self.model(input)\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    if u == 0:\n                        series_loss = my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss = my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                    else:\n                        series_loss += my_kl_loss(series[u], (\n                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)).detach()) * temperature\n                        prior_loss += my_kl_loss(\n                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                    self.win_size)),\n                            series[u].detach()) * temperature\n                metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n                cri = metric.detach().cpu().numpy()\n                attens_energy.append(cri)\n                test_labels.append(labels)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        test_labels = np.array(test_labels)\n\n        pred = (test_energy > thresh).astype(int)\n        gt = test_labels.astype(int)\n\n        # labels = np.asarray(labels_log, np.int64)[0]\n\n        # print(\"test_energy.shape = \", test_energy.shape, test_labels.shape)\n        # print(\"test_energy.shape = \", test_energy[:10])\n        # print(\"test_labels.shape = \", test_labels[:10])\n\n        # results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)\n        # results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)\n        # results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)\n\n        eval_res = {\n            'f1': None,\n            'precision': None,\n            'recall': None,\n            \"Affiliation precision\": None,\n            \"Affiliation recall\": None,\n            \"R_AUC_ROC\": None,\n            \"R_AUC_PR\": None,\n            \"VUS_ROC\": None,\n            \"VUS_PR\": None,\n            # 'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n            # 'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n            # 'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n        }\n\n        # matrix = [self.index]\n\n        min_len = min(min(pred.shape[0], gt.shape[0]), test_energy.shape[0])\n\n        scores_simple = combine_all_evaluation_scores(pred[:min_len], gt[:min_len], test_energy[:min_len])\n        for key, value in scores_simple.items():\n            # matrix.append(value)\n            if key == 'Affiliation precision':\n                eval_res[\"Affiliation precision\"] = value\n            if key == 'Affiliation recall':\n                eval_res[\"Affiliation recall\"] = value\n            if key == 'R_AUC_ROC':\n                eval_res[\"R_AUC_ROC\"] = value\n            if key == 'R_AUC_PR':\n                eval_res[\"R_AUC_PR\"] = value\n            if key == 'VUS_ROC':\n                eval_res[\"VUS_ROC\"] = value\n            if key == 'VUS_PR':\n                eval_res[\"VUS_PR\"] = value\n\n            print('{0:21} : {1:0.4f}'.format(key, value))\n\n        anomaly_state = False\n        for i in range(len(gt)):\n            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n                anomaly_state = True\n                for j in range(i, 0, -1):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n                for j in range(i, len(gt)):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n            elif gt[i] == 0:\n                anomaly_state = False\n            if anomaly_state:\n                pred[i] = 1\n\n        pred = np.array(pred)\n        gt = np.array(gt)\n\n        from sklearn.metrics import precision_recall_fscore_support\n        from sklearn.metrics import accuracy_score\n\n        accuracy = accuracy_score(gt, pred)\n        precision, recall, f_score, support = precision_recall_fscore_support(gt[:min_len], pred[:min_len], average='binary')\n        print(\n            \"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} \".format(accuracy, precision,\n                                                                                                   recall, f_score))\n\n        # if self.data_path == 'UCR' or 'UCR_AUG':\n        #     import csv\n        #     with open('result_dc/' + self.dataset + '.csv', 'a+') as f:\n        #         writer = csv.writer(f)\n        #         writer.writerow(matrix)\n\n        eval_res['f1'] = f_score\n        eval_res['precision'] = precision\n        eval_res['recall'] = recall\n\n        return eval_res\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/donut.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nfrom models.donut_model import DONUT_Model\nfrom utils import split_with_nan, centerize_vary_length_series\nimport math\nimport time\nfrom tasks.anomaly_detection import eval_ad_result, np_shift\nimport bottleneck as bn\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events\nfrom other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc\nfrom other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events\nfrom tadpak import evaluate\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n\nclass DONUT:\n    \n    def __init__(\n        self,\n        input_dims,\n        latent_dim=100,\n        hidden_dim=3,\n        device='cuda',\n        lr=0.001,\n        batch_size=8,\n        z_kld_weight=0.1,\n        x_kld_weight=0.1,\n        max_train_length=None,\n        after_iter_callback=None,\n        after_epoch_callback=None\n    ):\n        \n        super().__init__()\n        self.device = device\n        self.lr = lr\n        self.batch_size = batch_size\n        self.z_kld_weight = z_kld_weight\n        self.x_kld_weight = x_kld_weight\n        self.max_train_length = max_train_length\n        self.input_dims = input_dims\n       \n        self.net = DONUT_Model(in_channel=input_dims, latent_dim=latent_dim, hidden_dim=hidden_dim).to(self.device)\n        \n        self.after_iter_callback = after_iter_callback\n        self.after_epoch_callback = after_epoch_callback\n        \n        self.n_epochs = 0\n        self.n_iters = 0\n    \n    def train(self, train_data, n_epochs=None, n_iters=None, verbose=False):\n        ''' \n        Args:\n            train_data (numpy.ndarray): The training data. It should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            n_epochs (Union[int, NoneType]): The number of epochs. When this reaches, the training stops.\n            n_iters (Union[int, NoneType]): The number of iterations. When this reaches, the training stops. If both n_epochs and n_iters are not specified, a default setting would be used that sets n_iters to 200 for a dataset with size <= 100000, 600 otherwise.\n            verbose (bool): Whether to print the training loss after each epoch.\n            \n        Returns:\n            loss_log: a list containing the training losses on each epoch.\n        '''\n        assert train_data.ndim == 3\n        \n        if n_iters is None and n_epochs is None:\n            n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters\n        \n        if self.max_train_length is not None:\n            sections = train_data.shape[1] // self.max_train_length\n            if sections >= 2:\n                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)\n                # train_data: (n_instance*sections, max_train_length, n_features)\n\n        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0) # (max_train_length)\n        if temporal_missing[0] or temporal_missing[-1]: # whether the head or tail exists nan\n            train_data = centerize_vary_length_series(train_data)\n                \n        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)] \n        # delete the sequence (max_train_length, n_features) contains only nan\n\n        for i in range(train_data.shape[0]):\n            train_data[i][np.isnan(train_data[i])] = np.nanmean(train_data[i])\n        \n        train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))\n        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)\n        \n        optimizer = torch.optim.AdamW(self.net.parameters(), lr=self.lr)\n        \n        loss_log = []\n        \n        while True:\n            if n_epochs is not None and self.n_epochs >= n_epochs:\n                break\n            \n            cum_loss = 0\n            n_epoch_iters = 0\n            \n            interrupted = False\n            for batch in train_loader:\n                if n_iters is not None and self.n_iters >= n_iters:\n                    interrupted = True\n                    break\n                \n                x = batch[0]  #(batch_size, n_timestamps, n_features)\n                # print(\"#####################\")\n                # raise Exception('my personal exception!')\n\n                if self.max_train_length is not None and x.size(1) > self.max_train_length:\n                    window_offset = np.random.randint(x.size(1) - self.max_train_length + 1)\n                    x = x[:, window_offset : window_offset + self.max_train_length]\n                x = x.to(self.device)\n                \n                optimizer.zero_grad()\n                \n                outputs, z_mu, z_log_var, x_mu, x_log_var = self.net(x) \n                loss = self.net.loss_function(x, outputs, z_mu, z_log_var, x_mu, x_log_var, self.z_kld_weight, self.x_kld_weight)\n                \n                loss.backward()\n                optimizer.step()\n                    \n                cum_loss += loss.item()\n                n_epoch_iters += 1\n                \n                self.n_iters += 1\n                \n                if self.after_iter_callback is not None:\n                    self.after_iter_callback(self, loss.item())\n            \n            if interrupted:\n                break\n            \n            cum_loss /= n_epoch_iters\n            loss_log.append(cum_loss)\n            if verbose:\n                print(f\"Epoch #{self.n_epochs}: loss={cum_loss}\")\n            self.n_epochs += 1\n            \n            if self.after_epoch_callback is not None:\n                self.after_epoch_callback(self, cum_loss)\n            \n        return loss_log\n    \n\n    def anomaly_score(self, model, test_data, is_multi=False):\n        if is_multi:\n            test_data = torch.from_numpy(np.float32(test_data.reshape(1, -1, self.input_dims))).to(self.device)\n        else:\n            test_data = torch.from_numpy(np.float32(test_data.reshape(1, -1, 1))).to(self.device)\n        # test_data = torch.from_numpy(np.float32(test_data.reshape(1, -1, 1))).to(self.device)\n\n        if self.max_train_length is not None and test_data.size(1) > self.max_train_length:\n            window_offset = np.random.randint(test_data.size(1) - self.max_train_length + 1)\n            test_data = test_data[:, window_offset: window_offset + self.max_train_length]\n\n        # 设置批次大小\n        batch_size = 2\n\n        # 创建 DataLoader\n        test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)\n\n        self.net.eval()\n        with torch.no_grad():\n            # 初始化保存输出的列表\n            outputs_list = []\n            # z_mu_list = []\n            # z_log_var_list = []\n            # x_mu_list = []\n            # x_log_var_list = []\n            for input_data in test_loader:\n                input_data = input_data[0]  # 从 TensorDataset 中提取数据\n\n\n                # x = x.to(self.device)\n\n                print(\"input_data.shape = \", input_data.shape)\n                batch_outputs, batch_z_mu, batch_z_log_var, batch_x_mu, batch_x_log_var =  self.net(input_data)\n\n                # 保存每个批次的结果\n                outputs_list.append(batch_outputs)\n                # z_mu_list.append(batch_z_mu)\n                # z_log_var_list.append(batch_z_log_var)\n                # x_mu_list.append(batch_x_mu)\n                # x_log_var_list.append(batch_x_log_var)\n\n            # 将所有批次结果整合\n            outputs = torch.cat(outputs_list, dim=0)\n            # z_mu = torch.cat(z_mu_list, dim=0)\n            # z_log_var = torch.cat(z_log_var_list, dim=0)\n            # x_mu = torch.cat(x_mu_list, dim=0)\n            # x_log_var = torch.cat(x_log_var_list, dim=0)\n            # print(\"test_data.shape = \", test_data.shape)\n            # print(\"self.net = \", self.net)\n            # outputs, z_mu, z_log_var, x_mu, x_log_var = self.net(test_data)\n\n            # rec_error = torch.sum(torch.abs(outputs - test_data), dim=-1)\n            rec_error = torch.sum(torch.square(outputs - test_data), dim=-1)\n            rec_error = torch.flatten(rec_error)\n\n        return rec_error\n    \n    def evaluate(self, model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay, is_multi=False, ucr_index=None):\n        t = time.time()\n\n        res_log = []\n        labels_log = []\n        timestamps_log = []\n        res_log_socres = []\n        if is_multi:\n            train_data = all_train_data\n\n            test_data = all_test_data\n            test_labels = all_test_labels\n\n            print(\"train_data.shape = \", train_data.shape, \", test_data.shape = \", test_data.shape)\n\n            train_err = self.anomaly_score(model, train_data, is_multi=is_multi).detach().cpu().numpy()\n            test_err = self.anomaly_score(model, test_data, is_multi=is_multi).detach().cpu().numpy()\n\n            ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n            train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n            test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n            train_err_adj = train_err_adj[22:]\n\n            thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n            test_res = (test_err_adj > thr) * 1\n            res_log_socres.append(test_err_adj)\n\n            for i in range(len(test_res)):\n                if i >= delay and test_res[i - delay:i].sum() >= 1:\n                    test_res[i] = 0\n\n            res_log.append(test_res)\n            labels_log.append(test_labels)\n\n        else:\n            for k in all_test_data:\n                train_data = all_train_data[k]\n                train_labels = all_train_labels[k]\n                train_timestamps = all_train_timestamps[k]\n\n                test_data = all_test_data[k]\n                test_labels = all_test_labels[k]\n                test_timestamps = all_test_timestamps[k]\n\n                train_err = self.anomaly_score(model, train_data).detach().cpu().numpy()\n                test_err = self.anomaly_score(model, test_data).detach().cpu().numpy()\n\n                ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n                train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n                test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n                train_err_adj = train_err_adj[22:]\n\n                thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n                test_res = (test_err_adj > thr) * 1\n                res_log_socres.append(test_err_adj)\n\n                for i in range(len(test_res)):\n                    if i >= delay and test_res[i-delay:i].sum() >= 1:\n                        test_res[i] = 0\n\n                res_log.append(test_res)\n                labels_log.append(test_labels)\n                timestamps_log.append(test_timestamps)\n        t = time.time() - t\n\n        if is_multi:\n            if ucr_index == 79 or ucr_index == 108 or ucr_index == 187 or ucr_index == 203:\n                labels = np.asarray(labels_log, np.int64)[0]\n                pred = np.asarray(res_log, np.int64)[0]\n\n                labels, pred = adjustment(labels, pred)\n\n                eval_res = {\n                    'f1': f1_score(labels, pred),\n                    'precision': precision_score(labels, pred),\n                    'recall': recall_score(labels, pred),\n                    \"Affiliation precision\": None,\n                    \"Affiliation recall\": None,\n                    \"R_AUC_ROC\": None,\n                    \"R_AUC_PR\": None,\n                    \"VUS_ROC\": None,\n                    \"VUS_PR\": None,\n                    'f1_pa_10': None,\n                    'f1_pa_50': None,\n                    'f1_pa_90': None,\n                }\n            else:\n\n\n                labels = np.asarray(labels_log, np.int64)[0]\n                pred = np.asarray(res_log, np.int64)[0]\n                # print(\"labels.shape = \", labels.shape, labels[:5])\n                # print(\"pred.shape = \", pred.shape, pred[:5])\n\n                events_pred = convert_vector_to_events(pred)\n                events_gt = convert_vector_to_events(labels)\n\n                Trange = (0, len(labels))\n                affiliation = pr_from_events(events_pred, events_gt, Trange)\n                vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n                pred_scores = np.asarray(res_log_socres, np.float64)[0]\n                results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n                results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n                results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n                labels, pred = adjustment(labels, pred)\n\n                eval_res = {\n                    'f1': f1_score(labels, pred),\n                    'precision': precision_score(labels, pred),\n                    'recall': recall_score(labels, pred),\n                    \"Affiliation precision\": affiliation['precision'],\n                    \"Affiliation recall\": affiliation['recall'],\n                    \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n                    \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n                    \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n                    \"VUS_PR\": vus_results[\"VUS_PR\"],\n                    'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n                    'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n                    'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n                }\n        else:\n\n            eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay, pred_scores=res_log_socres)\n        eval_res['infer_time'] = t\n        return res_log, eval_res\n\n    def save(self, fn):\n        ''' Save the model to a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        torch.save(self.net.state_dict(), fn)\n    \n    def load(self, fn):\n        ''' Load the model from a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        state_dict = torch.load(fn, map_location=self.device)\n        self.net.load_state_dict(state_dict)"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/exp_anomaly_detection.py",
    "content": "from sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import accuracy_score\nimport torch.multiprocessing\nfrom other_anomaly_baselines.models import TimesNet\nfrom other_anomaly_baselines.models import GPT4TS\n\ntorch.multiprocessing.set_sharing_strategy('file_system')\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport os\nimport time\nimport warnings\nimport numpy as np\nimport math\nfrom other_anomaly_baselines.metrics.metrics import *\nimport warnings\nfrom tadpak import evaluate\n\nfrom torch.utils.data import TensorDataset, DataLoader\n\n\nwarnings.filterwarnings('ignore')\n\n\n\nclass UniLoader_train(object):\n    def __init__(self, data_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size])\n\n\nclass UniLoader_test(object):\n    def __init__(self, data_set, label_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n        self.train_labels = label_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size]), np.float32(self.train_labels[0:self.win_size])\n\n\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n    # lr = args.learning_rate * (0.2 ** (epoch // 2))\n    if args.lradj == 'type1':\n        lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))}\n    elif args.lradj == 'type2':\n        lr_adjust = {\n            2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,\n            10: 5e-7, 15: 1e-7, 20: 5e-8\n        }\n    elif args.lradj == \"cosine\":\n        lr_adjust = {epoch: args.learning_rate /2 * (1 + math.cos(epoch / args.train_epochs * math.pi))}\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group['lr'] = lr\n        print('Updating learning rate to {}'.format(lr))\n\n\nclass EarlyStopping:\n    def __init__(self, patience=7, verbose=False, delta=0):\n        self.patience = patience\n        self.verbose = verbose\n        self.counter = 0\n        self.best_score = None\n        self.early_stop = False\n        self.val_loss_min = np.Inf\n        self.delta = delta\n\n    def __call__(self, val_loss, model, path):\n        score = -val_loss\n        if self.best_score is None:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n        elif score < self.best_score + self.delta:\n            self.counter += 1\n            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n            if self.counter >= self.patience:\n                self.early_stop = True\n        else:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n            self.counter = 0\n\n    def save_checkpoint(self, val_loss, model, path):\n        if self.verbose:\n            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')\n        torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')\n        self.val_loss_min = val_loss\n\nclass Exp_Basic(object):\n    def __init__(self, args):\n        self.args = args\n        self.model_dict = {\n            'TimesNet': TimesNet,\n            'GPT4TS': GPT4TS,\n        }\n        self.device = self._acquire_device()\n        self.model = self._build_model().to(self.device)\n\n    def _build_model(self):\n        raise NotImplementedError\n        return None\n\n    def _acquire_device(self):\n        if self.args.use_gpu:\n            os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(\n                self.args.gpu) if not self.args.use_multi_gpu else self.args.devices\n            device = torch.device('cuda:{}'.format(self.args.gpu))\n            print('Use GPU: cuda:{}'.format(self.args.gpu))\n        else:\n            device = torch.device('cpu')\n            print('Use CPU')\n        return device\n\n    def _get_data(self):\n        pass\n\n    def vali(self):\n        pass\n\n    def train(self):\n        pass\n\n    def test(self):\n        pass\n\n\nclass Exp_Anomaly_Detection(Exp_Basic):\n    def __init__(self, args, train_set, train_loader, val_set, val_loader, test_set, test_loader):\n        super(Exp_Anomaly_Detection, self).__init__(args)\n        self.train_set = train_set\n        self.train_loader = train_loader\n        self.val_set = val_set\n        self.val_loader = val_loader\n        self.test_set = test_set\n        self.test_loader = test_loader\n\n    def _build_model(self):\n        model = self.model_dict[self.args.model].Model(self.args).float()\n\n        if self.args.use_multi_gpu and self.args.use_gpu:\n            model = nn.DataParallel(model, device_ids=self.args.device_ids)\n        return model\n\n    def _get_data(self, flag):\n        # data_set, data_loader = data_provider(self.args, flag)\n        if flag == 'train':\n            return self.train_set, self.train_loader\n\n        if flag == 'val':\n            return self.val_set, self.val_loader\n\n        if flag == 'test':\n            return self.test_set, self.test_loader\n\n        # return self.data_set, self.data_loader\n\n    def _select_optimizer(self):\n        model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n        return model_optim\n\n    def _select_criterion(self):\n        criterion = nn.MSELoss()\n        return criterion\n\n    def vali(self, vali_data, vali_loader, criterion):\n        total_loss = []\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, _) in enumerate(vali_loader):\n                batch_x = batch_x.float().to(self.device)\n\n                outputs = self.model(batch_x, None, None, None)\n\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, :, f_dim:]\n                pred = outputs.detach().cpu()\n                true = batch_x.detach().cpu()\n\n                loss = criterion(pred, true)\n                total_loss.append(loss)\n        total_loss = np.average(total_loss)\n        self.model.train()\n        return total_loss\n\n    def vali_uni(self, vali_data, vali_loader, criterion):\n        total_loss = []\n        self.model.eval()\n        with torch.no_grad():\n            for i, batch_x in enumerate(vali_loader):\n                batch_x = batch_x.float().to(self.device)\n\n                outputs = self.model(batch_x, None, None, None)\n\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, :, f_dim:]\n                pred = outputs.detach().cpu()\n                true = batch_x.detach().cpu()\n\n                loss = criterion(pred, true)\n                total_loss.append(loss)\n        total_loss = np.average(total_loss)\n        self.model.train()\n        return total_loss\n\n    def train(self, setting):\n        train_data, train_loader = self._get_data(flag='train')\n        vali_data, vali_loader = self._get_data(flag='val')\n        test_data, test_loader = self._get_data(flag='test')\n\n        path = os.path.join(self.args.checkpoints, setting)\n        if not os.path.exists(path):\n            os.makedirs(path)\n\n        time_now = time.time()\n\n        train_steps = len(train_loader)\n        early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n        model_optim = self._select_optimizer()\n        criterion = self._select_criterion()\n\n        for epoch in range(self.args.train_epochs):\n            iter_count = 0\n            train_loss = []\n\n            self.model.train()\n            epoch_time = time.time()\n            for i, (batch_x, batch_y) in enumerate(train_loader):\n                iter_count += 1\n                model_optim.zero_grad()\n\n                batch_x = batch_x.float().to(self.device)\n\n                outputs = self.model(batch_x, None, None, None)\n\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, :, f_dim:]\n                loss = criterion(outputs, batch_x)\n                train_loss.append(loss.item())\n\n                # print(\"loss = \", loss)\n                # print(\"batch_x.shape = \", batch_x.shape, \", outputs.shape = \", outputs.shape)\n\n                if (i + 1) % 100 == 0:\n                    print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                loss.backward()\n                model_optim.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(train_loss)\n            vali_loss = self.vali(vali_data, vali_loader, criterion)\n            test_loss = self.vali(test_data, test_loader, criterion)\n\n            print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n                epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n            early_stopping(vali_loss, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n            adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n        best_model_path = path + '/' + 'checkpoint.pth'\n        self.model.load_state_dict(torch.load(best_model_path))\n\n        return self.model\n\n    def train_uni(self, setting):\n        train_data, train_loader = self._get_data(flag='train')\n        vali_data, vali_loader = self._get_data(flag='val')\n        test_data, test_loader = self._get_data(flag='test')\n\n        path = os.path.join(self.args.checkpoints, setting)\n        if not os.path.exists(path):\n            os.makedirs(path)\n\n        time_now = time.time()\n\n        train_steps = len(train_loader)\n        early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n        model_optim = self._select_optimizer()\n        criterion = self._select_criterion()\n\n        for epoch in range(self.args.train_epochs):\n            iter_count = 0\n            train_loss = []\n\n            self.model.train()\n            epoch_time = time.time()\n            for i, batch_x in enumerate(train_loader):\n                iter_count += 1\n                model_optim.zero_grad()\n\n                batch_x = batch_x.float().to(self.device)\n\n                # print(\"batch_x.shape = \", batch_x.shape, \", batch_x[:5] = \", batch_x[:5])\n\n                outputs = self.model(batch_x, None, None, None)\n\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, :, f_dim:]\n                loss = criterion(outputs, batch_x)\n                train_loss.append(loss.item())\n\n                # print(\"loss = \", loss)\n                # print(\"batch_x.shape = \", batch_x.shape, \", outputs.shape = \", outputs.shape)\n\n                if (i + 1) % 100 == 0:\n                    print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                loss.backward()\n                model_optim.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(train_loss)\n            vali_loss = self.vali_uni(vali_data, vali_loader, criterion)\n            # test_loss = self.vali(test_data, test_loader, criterion)\n\n            print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f}\".format(\n                epoch + 1, train_steps, train_loss))\n            early_stopping(vali_loss, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n            adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n        best_model_path = path + '/' + 'checkpoint.pth'\n        self.model.load_state_dict(torch.load(best_model_path))\n\n        return self.model\n\n    def test(self, setting, test=0, dataset=None, ucr_index=None):\n        test_data, test_loader = self._get_data(flag='test')\n        train_data, train_loader = self._get_data(flag='train')\n        if test:\n            print('loading model')\n            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n        attens_energy = []\n        folder_path = './test_results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        self.model.eval()\n        self.anomaly_criterion = nn.MSELoss(reduce=False)\n\n        # (1) stastic on the train set\n        with torch.no_grad():\n            for i, (batch_x, batch_y) in enumerate(train_loader):\n                batch_x = batch_x.float().to(self.device)\n                # reconstruction\n                outputs = self.model(batch_x, None, None, None)\n                # criterion\n                score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)\n                score = score.detach().cpu().numpy()\n                attens_energy.append(score)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        train_energy = np.array(attens_energy)\n\n        # (2) find the threshold\n        attens_energy = []\n        test_labels = []\n        for i, (batch_x, batch_y) in enumerate(test_loader):\n            batch_x = batch_x.float().to(self.device)\n            # reconstruction\n            outputs = self.model(batch_x, None, None, None)\n            # criterion\n            score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)\n            score = score.detach().cpu().numpy()\n            attens_energy.append(score)\n            test_labels.append(batch_y)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        combined_energy = np.concatenate([train_energy, test_energy], axis=0)\n        threshold = np.percentile(combined_energy, 100 - self.args.anomaly_ratio)\n        print(\"Threshold :\", threshold)\n\n        # (3) evaluation on the test set\n        pred = (test_energy > threshold).astype(int)\n        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)\n        test_labels = np.array(test_labels)\n        gt = test_labels.astype(int)\n\n        print(\"pred:   \", pred.shape)\n        print(\"gt:     \", gt.shape)\n\n        # if dataset == 'UCR':\n        eval_res = {\n            'f1': None,\n            'precision': None,\n            'recall': None,\n            \"Affiliation precision\": None,\n            \"Affiliation recall\": None,\n            \"R_AUC_ROC\": None,\n            \"R_AUC_PR\": None,\n            \"VUS_ROC\": None,\n            \"VUS_PR\": None,\n            'f1_pa_10': None,\n            'f1_pa_50': None,\n            'f1_pa_90': None,\n        }\n\n        # else:\n        #\n        #     results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)\n        #     results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)\n        #     results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)\n        #\n        #     eval_res = {\n        #         'f1': None,\n        #         'precision': None,\n        #         'recall': None,\n        #         \"Affiliation precision\": None,\n        #         \"Affiliation recall\": None,\n        #         \"R_AUC_ROC\": None,\n        #         \"R_AUC_PR\": None,\n        #         \"VUS_ROC\": None,\n        #         \"VUS_PR\": None,\n        #         'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n        #         'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n        #         'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n        #     }\n        if ucr_index == 79 or ucr_index == 108 or ucr_index == 187 or ucr_index == 203:\n            pass\n        else:\n\n            if dataset == 'SMD' or dataset == 'NIPS_TS_Swan' or dataset == 'NIPS_TS_Water' or dataset == 'SWAT':\n                pass\n            else:\n                scores_simple = combine_all_evaluation_scores(pred, gt, test_energy)\n                for key, value in scores_simple.items():\n                    if key == 'Affiliation precision':\n                        eval_res[\"Affiliation precision\"] = value\n                    if key == 'Affiliation recall':\n                        eval_res[\"Affiliation recall\"] = value\n                    if key == 'R_AUC_ROC':\n                        eval_res[\"R_AUC_ROC\"] = value\n                    if key == 'R_AUC_PR':\n                        eval_res[\"R_AUC_PR\"] = value\n                    if key == 'VUS_ROC':\n                        eval_res[\"VUS_ROC\"] = value\n                    if key == 'VUS_PR':\n                        eval_res[\"VUS_PR\"] = value\n\n\n        # (4) detection adjustment\n        gt, pred = adjustment(gt, pred)\n\n        pred = np.array(pred)\n        gt = np.array(gt)\n        print(\"pred: \", pred.shape)\n        print(\"gt:   \", gt.shape)\n\n        accuracy = accuracy_score(gt, pred)\n        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred, average='binary')\n        print(\"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} \".format(\n            accuracy, precision,\n            recall, f_score))\n\n        eval_res['f1'] = f_score\n        eval_res['precision'] = precision\n        eval_res['recall'] = recall\n\n\n        f = open(\"result_anomaly_detection.txt\", 'a')\n        f.write(setting + \"  \\n\")\n        f.write(\"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} \".format(\n            accuracy, precision,\n            recall, f_score))\n        f.write('\\n')\n        f.write('\\n')\n        f.close()\n\n\n        return eval_res\n\n\n    def test_uni(self, setting, all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, config, test=0):\n        # test_data, test_loader = self._get_data(flag='test')\n        # train_data, train_loader = self._get_data(flag='train')\n\n\n        if test:\n            print('loading model')\n            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n        attens_energy = []\n        folder_path = './test_results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        self.model.eval()\n        self.anomaly_criterion = nn.MSELoss(reduce=False)\n\n        # (1) stastic on the train set\n\n        with torch.no_grad():\n            # for i, (batch_x, batch_y) in enumerate(train_loader):\n            #     batch_x = batch_x.float().to(self.device)\n            #     # reconstruction\n            #     outputs = self.model(batch_x, None, None, None)\n            #     # criterion\n            #     score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)\n            #     score = score.detach().cpu().numpy()\n            #     attens_energy.append(score)\n\n            for k in all_train_data:\n                train_data = all_train_data[k]\n\n                train_data = np.array(train_data)\n\n                # train_data =\n                train_data = np.expand_dims(train_data, axis=-1)\n                train_dataset = UniLoader_train(train_data, config.win_size, 1)\n\n                train_loader = DataLoader(dataset=train_dataset,\n                                          batch_size=config.batch_size,\n                                          shuffle=True,\n                                          num_workers=2,\n                                          drop_last=True)\n\n                for i, input_data in enumerate(train_loader):\n                    # print(\"type(input) = \", type(input_data), input_data.shape)\n                    batch_x = input_data.float().to(self.device)\n                    # reconstruction\n                    outputs = self.model(batch_x, None, None, None)\n                    # criterion\n                    score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)\n                    score = score.detach().cpu().numpy()\n                    attens_energy.append(score)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        train_energy = np.array(attens_energy)\n\n        # (2) find the threshold\n        attens_energy = []\n        test_labels = []\n        with torch.no_grad():\n\n            for k in all_train_data:\n                _test_labels = all_test_labels[k]\n                test_data = all_test_data[k]\n\n                test_data = np.array(test_data)\n\n                test_data = np.expand_dims(test_data, axis=-1)\n\n                test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)\n\n                test_loader = DataLoader(dataset=test_dataset,\n                                         batch_size=config.batch_size,\n                                         shuffle=True,\n                                         num_workers=2,\n                                         drop_last=True)\n\n                for i, (input_data, labels) in enumerate(test_loader):\n                    batch_x = input_data.float().to(self.device)\n\n                    outputs = self.model(batch_x, None, None, None)\n                    # criterion\n                    score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)\n                    score = score.detach().cpu().numpy()\n                    attens_energy.append(score)\n                    test_labels.append(labels)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        combined_energy = np.concatenate([train_energy, test_energy], axis=0)\n        threshold = np.percentile(combined_energy, 100 - self.args.anomaly_ratio)\n        print(\"Threshold :\", threshold)\n\n        # (3) evaluation on the test set\n        pred = (test_energy > threshold).astype(int)\n        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)\n        test_labels = np.array(test_labels)\n        gt = test_labels.astype(int)\n\n        print(\"pred:   \", pred.shape)\n        print(\"gt:     \", gt.shape)\n\n        # if dataset == 'UCR':\n        eval_res = {\n            'f1': None,\n            'precision': None,\n            'recall': None,\n            \"Affiliation precision\": None,\n            \"Affiliation recall\": None,\n            \"R_AUC_ROC\": None,\n            \"R_AUC_PR\": None,\n            \"VUS_ROC\": None,\n            \"VUS_PR\": None,\n            'f1_pa_10': None,\n            'f1_pa_50': None,\n            'f1_pa_90': None,\n        }\n\n        # scores_simple = combine_all_evaluation_scores(pred, gt, test_energy)\n        # for key, value in scores_simple.items():\n        #     if key == 'Affiliation precision':\n        #         eval_res[\"Affiliation precision\"] = value\n        #     if key == 'Affiliation recall':\n        #         eval_res[\"Affiliation recall\"] = value\n        #     if key == 'R_AUC_ROC':\n        #         eval_res[\"R_AUC_ROC\"] = value\n        #     if key == 'R_AUC_PR':\n        #         eval_res[\"R_AUC_PR\"] = value\n        #     if key == 'VUS_ROC':\n        #         eval_res[\"VUS_ROC\"] = value\n        #     if key == 'VUS_PR':\n        #         eval_res[\"VUS_PR\"] = value\n\n\n        # (4) detection adjustment\n        gt, pred = adjustment(gt, pred)\n\n        pred = np.array(pred)\n        gt = np.array(gt)\n        print(\"pred: \", pred.shape)\n        print(\"gt:   \", gt.shape)\n\n        accuracy = accuracy_score(gt, pred)\n        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred, average='binary')\n        print(\"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} \".format(\n            accuracy, precision,\n            recall, f_score))\n\n        eval_res['f1'] = f_score\n        eval_res['precision'] = precision\n        eval_res['recall'] = recall\n\n\n        f = open(\"result_anomaly_detection.txt\", 'a')\n        f.write(setting + \"  \\n\")\n        f.write(\"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} \".format(\n            accuracy, precision,\n            recall, f_score))\n        f.write('\\n')\n        f.write('\\n')\n        f.close()\n\n\n        return eval_res\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/hello_test_evo.py",
    "content": "print(\"Hello World!!!\")"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/lstm_vae.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nfrom models.lstm_vae_model import LSTM_VAE_Model\nfrom utils import split_with_nan, centerize_vary_length_series\nimport math\nimport time\nfrom tasks.anomaly_detection import eval_ad_result, np_shift\nimport bottleneck as bn\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events\nfrom other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc\nfrom other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events\nfrom tadpak import evaluate\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n\nclass LSTM_VAE:\n    \n    def __init__(\n        self,\n        input_dims,\n        hidden_size=16,\n        hidden_dim=3,\n        device='cuda',\n        lr=0.001,\n        batch_size=8,\n        z_kld_weight=0.1,\n        x_kld_weight=0.1,\n        max_train_length=None,\n        after_iter_callback=None,\n        after_epoch_callback=None\n    ):\n        \n        super().__init__()\n        self.device = device\n        self.lr = lr\n        self.batch_size = batch_size\n        self.z_kld_weight = z_kld_weight\n        self.x_kld_weight = x_kld_weight\n        self.max_train_length = max_train_length\n        self.input_dims = input_dims\n       \n        self.net = LSTM_VAE_Model(device=self.device, in_channel=input_dims, hidden_size=hidden_size, hidden_dim=hidden_dim).to(self.device)\n        \n        self.after_iter_callback = after_iter_callback\n        self.after_epoch_callback = after_epoch_callback\n        \n        self.n_epochs = 0\n        self.n_iters = 0\n    \n    def train(self, train_data, n_epochs=None, n_iters=None, verbose=False):\n        ''' \n        Args:\n            train_data (numpy.ndarray): The training data. It should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            n_epochs (Union[int, NoneType]): The number of epochs. When this reaches, the training stops.\n            n_iters (Union[int, NoneType]): The number of iterations. When this reaches, the training stops. If both n_epochs and n_iters are not specified, a default setting would be used that sets n_iters to 200 for a dataset with size <= 100000, 600 otherwise.\n            verbose (bool): Whether to print the training loss after each epoch.\n            \n        Returns:\n            loss_log: a list containing the training losses on each epoch.\n        '''\n        assert train_data.ndim == 3\n        \n        if n_iters is None and n_epochs is None:\n            n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters\n        \n        if self.max_train_length is not None:\n            sections = train_data.shape[1] // self.max_train_length\n            if sections >= 2:\n                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)\n                # train_data: (n_instance*sections, max_train_length, n_features)\n\n        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0) # (max_train_length)\n        if temporal_missing[0] or temporal_missing[-1]: # whether the head or tail exists nan\n            train_data = centerize_vary_length_series(train_data)\n                \n        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)] \n        # delete the sequence (max_train_length, n_features) contains only nan\n\n        for i in range(train_data.shape[0]):\n            train_data[i][np.isnan(train_data[i])] = np.nanmean(train_data[i])\n        \n        train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))\n        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)\n        \n        optimizer = torch.optim.AdamW(self.net.parameters(), lr=self.lr)\n        \n        loss_log = []\n        \n        while True:\n            if n_epochs is not None and self.n_epochs >= n_epochs:\n                break\n            \n            cum_loss = 0\n            n_epoch_iters = 0\n            \n            interrupted = False\n            for batch in train_loader:\n                if n_iters is not None and self.n_iters >= n_iters:\n                    interrupted = True\n                    break\n                \n                x = batch[0]  #(batch_size, n_timestamps, n_features)\n                # print(\"#####################\")\n                # raise Exception('my personal exception!')\n\n                if self.max_train_length is not None and x.size(1) > self.max_train_length:\n                    window_offset = np.random.randint(x.size(1) - self.max_train_length + 1)\n                    x = x[:, window_offset : window_offset + self.max_train_length]\n                x = x.to(self.device)\n                \n                optimizer.zero_grad()\n                \n                outputs, z_mu, z_log_var, x_mu, x_log_var = self.net(x) \n                loss = self.net.loss_function(x, outputs, z_mu, z_log_var, x_mu, x_log_var, self.z_kld_weight, self.x_kld_weight)\n                \n                loss.backward()\n                optimizer.step()\n                    \n                cum_loss += loss.item()\n                n_epoch_iters += 1\n                \n                self.n_iters += 1\n                \n                if self.after_iter_callback is not None:\n                    self.after_iter_callback(self, loss.item())\n            \n            if interrupted:\n                break\n            \n            cum_loss /= n_epoch_iters\n            loss_log.append(cum_loss)\n            if verbose:\n                print(f\"Epoch #{self.n_epochs}: loss={cum_loss}\")\n            self.n_epochs += 1\n            \n            if self.after_epoch_callback is not None:\n                self.after_epoch_callback(self, cum_loss)\n            \n        return loss_log\n    \n\n    def anomaly_score(self, model, test_data, is_multi=False):\n        if is_multi:\n            test_data = torch.from_numpy(np.float32(test_data.reshape(1, -1, self.input_dims))).to(self.device)\n        else:\n            test_data = torch.from_numpy(np.float32(test_data.reshape(1, -1, 1))).to(self.device)\n\n        if self.max_train_length is not None and test_data.size(1) > self.max_train_length:\n            window_offset = np.random.randint(test_data.size(1) - self.max_train_length + 1)\n            test_data = test_data[:, window_offset: window_offset + self.max_train_length]\n\n        self.net.eval()\n        with torch.no_grad():\n            outputs, z_mu, z_log_var, x_mu, x_log_var = self.net(test_data)\n\n            # rec_error = torch.sum(torch.abs(outputs - test_data), dim=-1)\n            rec_error = torch.sum(torch.square(outputs - test_data), dim=-1)\n            rec_error = torch.flatten(rec_error)\n\n        return rec_error\n    \n    def evaluate(self, model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay, is_multi=False, ucr_index=None):\n        t = time.time()\n\n        res_log = []\n        labels_log = []\n        timestamps_log = []\n        res_log_socres = []\n        if is_multi:\n            train_data = all_train_data\n\n            test_data = all_test_data\n            test_labels = all_test_labels\n\n            train_err = self.anomaly_score(model, train_data, is_multi=is_multi).detach().cpu().numpy()\n            test_err = self.anomaly_score(model, test_data, is_multi=is_multi).detach().cpu().numpy()\n\n            ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n            train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n            test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n            train_err_adj = train_err_adj[22:]\n\n            thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n            test_res = (test_err_adj > thr) * 1\n            res_log_socres.append(test_err_adj)\n\n            for i in range(len(test_res)):\n                if i >= delay and test_res[i - delay:i].sum() >= 1:\n                    test_res[i] = 0\n\n            res_log.append(test_res)\n            labels_log.append(test_labels)\n\n        else:\n            for k in all_test_data:\n                train_data = all_train_data[k]\n                train_labels = all_train_labels[k]\n                train_timestamps = all_train_timestamps[k]\n\n                test_data = all_test_data[k]\n                test_labels = all_test_labels[k]\n                test_timestamps = all_test_timestamps[k]\n\n                train_err = self.anomaly_score(model, train_data).detach().cpu().numpy()\n                test_err = self.anomaly_score(model, test_data).detach().cpu().numpy()\n\n                ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n                train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n                test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n                train_err_adj = train_err_adj[22:]\n\n                thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n                test_res = (test_err_adj > thr) * 1\n                res_log_socres.append(test_err_adj)\n\n                for i in range(len(test_res)):\n                    if i >= delay and test_res[i-delay:i].sum() >= 1:\n                        test_res[i] = 0\n\n                res_log.append(test_res)\n                labels_log.append(test_labels)\n                timestamps_log.append(test_timestamps)\n        t = time.time() - t\n\n        if is_multi:\n            labels = np.asarray(labels_log, np.int64)[0]\n            pred = np.asarray(res_log, np.int64)[0]\n            # print(\"labels.shape = \", labels.shape, labels[:5])\n            # print(\"pred.shape = \", pred.shape, pred[:5])\n\n            if ucr_index == 79 or ucr_index == 108 or ucr_index == 187 or ucr_index == 203:\n\n                min_len = min(labels.shape[0], pred.shape[0])\n                labels = labels[:min_len]\n                pred = pred[:min_len]\n\n                labels, pred = adjustment(labels, pred)\n\n                eval_res = {\n                    'f1': f1_score(labels, pred),\n                    'precision': precision_score(labels, pred),\n                    'recall': recall_score(labels, pred),\n                    \"Affiliation precision\": None,\n                    \"Affiliation recall\": None,\n                    \"R_AUC_ROC\": None,\n                    \"R_AUC_PR\": None,\n                    \"VUS_ROC\": None,\n                    \"VUS_PR\": None,\n                    'f1_pa_10': None,\n                    'f1_pa_50': None,\n                    'f1_pa_90': None,\n                }\n            else:\n                events_pred = convert_vector_to_events(pred)\n                events_gt = convert_vector_to_events(labels)\n\n                Trange = (0, len(labels))\n                affiliation = pr_from_events(events_pred, events_gt, Trange)\n                vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n                # pred_scores = np.asarray(res_log_socres, np.float64)[0]\n                # results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n                # # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n                # results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n                # results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n                labels, pred = adjustment(labels, pred)\n\n                eval_res = {\n                    'f1': f1_score(labels, pred),\n                    'precision': precision_score(labels, pred),\n                    'recall': recall_score(labels, pred),\n                    \"Affiliation precision\": affiliation['precision'],\n                    \"Affiliation recall\": affiliation['recall'],\n                    \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n                    \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n                    \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n                    \"VUS_PR\": vus_results[\"VUS_PR\"],\n                    'f1_pa_10': None,\n                    'f1_pa_50': None,\n                    'f1_pa_90': None,\n                }\n        else:\n        \n            eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay, pred_scores=res_log_socres)\n        eval_res['infer_time'] = t\n        return res_log, eval_res\n\n    def save(self, fn):\n        ''' Save the model to a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        torch.save(self.net.state_dict(), fn)\n    \n    def load(self, fn):\n        ''' Load the model from a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        state_dict = torch.load(fn, map_location=self.device)\n        self.net.load_state_dict(state_dict)"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/AUC.py",
    "content": "# used by paper: TSB-UAD as the main evaluator\n# github: https://github.com/johnpaparrizos/TSB-UAD/blob/main/TSB_AD/utils/metrics.py\nimport numpy as np\nfrom sklearn import metrics\nfrom other_anomaly_baselines.metrics.evaluate_utils import find_length,range_convers_new\n\n\ndef extend_postive_range(x, window=16):\n    label = x.copy().astype(float)\n#     print(label)\n    L = range_convers_new(label)  # index of non-zero segments\n#     print(L)\n    length = len(label)\n    for k in range(len(L)):\n        s = L[k][0]\n        e = L[k][1]\n        # x1 is the extended list like [1,2,3] which are non-zero(from the end-e)\n        x1 = np.arange(e, min(e + window // 2, length))\n        label[x1] += np.sqrt(1 - (x1 - e) / (window))\n        # before the start-s\n        x2 = np.arange(max(s - window // 2, 0), s)\n        label[x2] += np.sqrt(1 - (s - x2) / (window))\n\n    label = np.minimum(np.ones(length), label)\n    return label\n\n\ndef extend_postive_range_individual(x, percentage=0.2):\n    label = x.copy().astype(float)\n    L = range_convers_new(label)  # index of non-zero segments\n    length = len(label)\n    for k in range(len(L)):\n        s = L[k][0]\n        e = L[k][1]\n\n        l0 = int((e - s + 1) * percentage)\n\n        x1 = np.arange(e, min(e + l0, length))\n        label[x1] += np.sqrt(1 - (x1 - e) / (2 * l0))\n\n        x2 = np.arange(max(s - l0, 0), s)\n        label[x2] += np.sqrt(1 - (s - x2) / (2 * l0))\n\n    label = np.minimum(np.ones(length), label)\n    return label\n\n\ndef TPR_FPR_RangeAUC(labels, pred, P, L):\n    product = labels * pred\n\n    TP = np.sum(product)\n\n    # recall = min(TP/P,1)\n    P_new = (P + np.sum(labels)) / 2  # so TPR is neither large nor small\n    # P_new = np.sum(labels)\n    recall = min(TP / P_new, 1)\n    # recall = TP/np.sum(labels)\n    # print('recall '+str(recall))\n\n    existence = 0\n    for seg in L:\n        if np.sum(product[seg[0]:(seg[1] + 1)]) > 0:\n            existence += 1\n\n    existence_ratio = existence / len(L)\n    # print(existence_ratio)\n\n    # TPR_RangeAUC = np.sqrt(recall*existence_ratio)\n    # print(existence_ratio)\n    TPR_RangeAUC = recall * existence_ratio\n\n    FP = np.sum(pred) - TP\n    # TN = np.sum((1-pred) * (1-labels))\n\n    # FPR_RangeAUC = FP/(FP+TN)\n    N_new = len(labels) - P_new\n    FPR_RangeAUC = FP / N_new\n\n    Precision_RangeAUC = TP / np.sum(pred)\n\n    return TPR_RangeAUC, FPR_RangeAUC, Precision_RangeAUC\n\n\ndef Range_AUC(score_t_test, y_test,  window=5, percentage=0, plot_ROC=False, AUC_type='window'):\n    # AUC_type='window'/'percentage'\n    score = score_t_test\n    labels = y_test\n    score_sorted = -np.sort(-score)\n\n    P = np.sum(labels)\n    # print(np.sum(labels))\n    if AUC_type == 'window':\n        labels = extend_postive_range(labels, window=window)\n    else:\n        labels = extend_postive_range_individual(labels, percentage=percentage)\n\n    # print(np.sum(labels))\n    L = range_convers_new(labels)\n    TPR_list = [0]\n    FPR_list = [0]\n    Precision_list = [1]\n\n    for i in np.linspace(0, len(score) - 1, 250).astype(int):\n        threshold = score_sorted[i]\n        # print('thre='+str(threshold))\n        pred = score >= threshold\n        TPR, FPR, Precision = TPR_FPR_RangeAUC(labels, pred, P, L)\n\n        TPR_list.append(TPR)\n        FPR_list.append(FPR)\n        Precision_list.append(Precision)\n\n    TPR_list.append(1)\n    FPR_list.append(1)  # otherwise, range-AUC will stop earlier than (1,1)\n\n    tpr = np.array(TPR_list)\n    fpr = np.array(FPR_list)\n    prec = np.array(Precision_list)\n\n    width = fpr[1:] - fpr[:-1]\n    height = (tpr[1:] + tpr[:-1]) / 2\n    AUC_range = np.sum(width * height)\n\n    width_PR = tpr[1:-1] - tpr[:-2]\n    height_PR = (prec[1:] + prec[:-1]) / 2\n    AP_range = np.sum(width_PR * height_PR)\n\n    if plot_ROC:\n        return AUC_range, AP_range, fpr, tpr, prec\n\n    return AUC_range\n\n\ndef point_wise_AUC(score_t_test, y_test,  plot_ROC=False):\n    # area under curve\n    label = y_test\n    score = score_t_test\n    auc = metrics.roc_auc_score(label, score)\n    # plor ROC curve\n    if plot_ROC:\n        fpr, tpr, thresholds = metrics.roc_curve(label, score)\n        # display = metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=auc)\n        # display.plot()\n        return auc, fpr, tpr\n    else:\n        return auc\n\n\ndef main():\n    y_test = np.zeros(100)\n    y_test[10:20] = 1\n    y_test[50:60] = 1\n    pred_labels = np.zeros(100)\n    pred_labels[15:17] = 0.5\n    pred_labels[55:62] = 0.7\n    # pred_labels[51:55] = 1\n    # true_events = get_events(y_test)\n    point_auc = point_wise_AUC(pred_labels, y_test)\n    range_auc = Range_AUC(pred_labels, y_test)\n    print(\"point_auc: {}, range_auc: {}\".format(point_auc, range_auc))\n\n\nif __name__ == \"__main__\":\n    main()"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/Matthews_correlation_coefficient.py",
    "content": "from sklearn.metrics import confusion_matrix\nimport numpy as np\n\n\ndef MCC(y_test, pred_labels):\n    tn, fp, fn, tp = confusion_matrix(y_test, pred_labels).ravel()\n    MCC_score = (tp*tn-fp*fn)/(((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))**0.5)\n\n    return MCC_score\n\n\ndef main():\n    y_test = np.zeros(100)\n    y_test[10:20] = 1\n    y_test[50:60] = 1\n    pred_labels = np.zeros(100)\n    pred_labels[15:17] = 1\n    pred_labels[55:62] = 1\n    # pred_labels[51:55] = 1\n    # true_events = get_events(y_test)\n    confusion_matric = MCC(y_test, pred_labels)\n#     print(confusion_matric)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_affiliation_zone.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom other_anomaly_baselines.metrics.affiliation._integral_interval import interval_intersection\n\ndef t_start(j, Js = [(1,2),(3,4),(5,6)], Trange = (1,10)):\n    \"\"\"\n    Helper for `E_gt_func`\n    \n    :param j: index from 0 to len(Js) (included) on which to get the start\n    :param Js: ground truth events, as a list of couples\n    :param Trange: range of the series where Js is included\n    :return: generalized start such that the middle of t_start and t_stop \n    always gives the affiliation zone\n    \"\"\"\n    b = max(Trange)\n    n = len(Js)\n    if j == n:\n        return(2*b - t_stop(n-1, Js, Trange))\n    else:\n        return(Js[j][0])\n\ndef t_stop(j, Js = [(1,2),(3,4),(5,6)], Trange = (1,10)):\n    \"\"\"\n    Helper for `E_gt_func`\n    \n    :param j: index from 0 to len(Js) (included) on which to get the stop\n    :param Js: ground truth events, as a list of couples\n    :param Trange: range of the series where Js is included\n    :return: generalized stop such that the middle of t_start and t_stop \n    always gives the affiliation zone\n    \"\"\"\n    if j == -1:\n        a = min(Trange)\n        return(2*a - t_start(0, Js, Trange))\n    else:\n        return(Js[j][1])\n\ndef E_gt_func(j, Js, Trange):\n    \"\"\"\n    Get the affiliation zone of element j of the ground truth\n    \n    :param j: index from 0 to len(Js) (excluded) on which to get the zone\n    :param Js: ground truth events, as a list of couples\n    :param Trange: range of the series where Js is included, can \n    be (-math.inf, math.inf) for distance measures\n    :return: affiliation zone of element j of the ground truth represented\n    as a couple\n    \"\"\"\n    range_left = (t_stop(j-1, Js, Trange) + t_start(j, Js, Trange))/2\n    range_right = (t_stop(j, Js, Trange) + t_start(j+1, Js, Trange))/2\n    return((range_left, range_right))\n\ndef get_all_E_gt_func(Js, Trange):\n    \"\"\"\n    Get the affiliation partition from the ground truth point of view\n    \n    :param Js: ground truth events, as a list of couples\n    :param Trange: range of the series where Js is included, can \n    be (-math.inf, math.inf) for distance measures\n    :return: affiliation partition of the events\n    \"\"\"\n    # E_gt is the limit of affiliation/attraction for each ground truth event\n    E_gt = [E_gt_func(j, Js, Trange) for j in range(len(Js))]\n    return(E_gt)\n\ndef affiliation_partition(Is = [(1,1.5),(2,5),(5,6),(8,9)], E_gt = [(1,2.5),(2.5,4.5),(4.5,10)]):\n    \"\"\"\n    Cut the events into the affiliation zones\n    The presentation given here is from the ground truth point of view,\n    but it is also used in the reversed direction in the main function.\n    \n    :param Is: events as a list of couples\n    :param E_gt: range of the affiliation zones\n    :return: a list of list of intervals (each interval represented by either \n    a couple or None for empty interval). The outer list is indexed by each\n    affiliation zone of `E_gt`. The inner list is indexed by the events of `Is`.\n    \"\"\"\n    out = [None] * len(E_gt)\n    for j in range(len(E_gt)):\n        E_gt_j = E_gt[j]\n        discarded_idx_before = [I[1] < E_gt_j[0] for I in Is]  # end point of predicted I is before the begin of E\n        discarded_idx_after = [I[0] > E_gt_j[1] for I in Is] # start of predicted I is after the end of E\n        kept_index = [not(a or b) for a, b in zip(discarded_idx_before, discarded_idx_after)]\n        Is_j = [x for x, y in zip(Is, kept_index)]\n        out[j] = [interval_intersection(I, E_gt[j]) for I in Is_j]\n    return(out)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_integral_interval.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport math\nfrom other_anomaly_baselines.metrics.affiliation.generics import _sum_wo_nan\n\"\"\"\nIn order to shorten the length of the variables,\nthe general convention in this file is to let:\n    - I for a predicted event (start, stop),\n    - Is for a list of predicted events,\n    - J for a ground truth event,\n    - Js for a list of ground truth events.\n\"\"\"\n\ndef interval_length(J = (1,2)):\n    \"\"\"\n    Length of an interval\n    \n    :param J: couple representating the start and stop of an interval, or None\n    :return: length of the interval, and 0 for a None interval\n    \"\"\"\n    if J is None:\n        return(0)\n    return(J[1] - J[0])\n\ndef sum_interval_lengths(Is = [(1,2),(3,4),(5,6)]):\n    \"\"\"\n    Sum of length of the intervals\n    \n    :param Is: list of intervals represented by starts and stops\n    :return: sum of the interval length\n    \"\"\"\n    return(sum([interval_length(I) for I in Is]))\n\ndef interval_intersection(I = (1, 3), J = (2, 4)): \n    \"\"\"\n    Intersection between two intervals I and J\n    I and J should be either empty or represent a positive interval (no point)\n    \n    :param I: an interval represented by start and stop\n    :param J: a second interval of the same form\n    :return: an interval representing the start and stop of the intersection (or None if empty)\n    \"\"\"\n    if I is None:\n        return(None)\n    if J is None:\n        return(None)\n        \n    I_inter_J = (max(I[0], J[0]), min(I[1], J[1]))\n    if I_inter_J[0] >= I_inter_J[1]:\n        return(None)\n    else:\n        return(I_inter_J)\n\ndef interval_subset(I = (1, 3), J = (0, 6)):\n    \"\"\"\n    Checks whether I is a subset of J\n    \n    :param I: an non empty interval represented by start and stop\n    :param J: a second non empty interval of the same form\n    :return: True if I is a subset of J\n    \"\"\"\n    if (I[0] >= J[0]) and (I[1] <= J[1]):\n        return True\n    else:\n        return False\n\ndef cut_into_three_func(I, J):\n    \"\"\"\n    Cut an interval I into a partition of 3 subsets:\n        the elements before J,\n        the elements belonging to J,\n        and the elements after J\n    \n    :param I: an interval represented by start and stop, or None for an empty one\n    :param J: a non empty interval\n    :return: a triplet of three intervals, each represented by either (start, stop) or None\n    \"\"\"\n    if I is None:\n        return((None, None, None))\n    \n    I_inter_J = interval_intersection(I, J)\n    if I == I_inter_J:\n        I_before = None\n        I_after = None\n    elif I[1] <= J[0]:\n        I_before = I\n        I_after = None\n    elif I[0] >= J[1]:\n        I_before = None\n        I_after = I\n    elif (I[0] <= J[0]) and (I[1] >= J[1]):\n        I_before = (I[0], I_inter_J[0])\n        I_after = (I_inter_J[1], I[1])\n    elif I[0] <= J[0]:\n        I_before = (I[0], I_inter_J[0])\n        I_after = None\n    elif I[1] >= J[1]:\n        I_before = None\n        I_after = (I_inter_J[1], I[1])\n    else:\n        raise ValueError('unexpected unconsidered case')\n    return(I_before, I_inter_J, I_after)\n  \ndef get_pivot_j(I, J):\n    \"\"\"\n    Get the single point of J that is the closest to I, called 'pivot' here,\n    with the requirement that I should be outside J\n    \n    :param I: a non empty interval (start, stop)\n    :param J: another non empty interval, with empty intersection with I\n    :return: the element j of J that is the closest to I\n    \"\"\"\n    if interval_intersection(I, J) is not None:\n        raise ValueError('I and J should have a void intersection')\n\n    j_pivot = None # j_pivot is a border of J\n    if max(I) <= min(J):\n        j_pivot = min(J)\n    elif min(I) >= max(J):\n        j_pivot = max(J)\n    else:\n        raise ValueError('I should be outside J')\n    return(j_pivot)\n\ndef integral_mini_interval(I, J):\n    \"\"\"\n    In the specific case where interval I is located outside J,\n    integral of distance from x to J over the interval x \\in I.\n    This is the *integral* i.e. the sum.\n    It's not the mean (not divided by the length of I yet)\n    \n    :param I: a interval (start, stop), or None\n    :param J: a non empty interval, with empty intersection with I\n    :return: the integral of distances d(x, J) over x \\in I\n    \"\"\"\n    if I is None:\n        return(0)\n\n    j_pivot = get_pivot_j(I, J)\n    a = min(I)\n    b = max(I)\n    return((b-a)*abs((j_pivot - (a+b)/2)))\n\ndef integral_interval_distance(I, J):\n    \"\"\"\n    For any non empty intervals I, J, compute the\n    integral of distance from x to J over the interval x \\in I.\n    This is the *integral* i.e. the sum. \n    It's not the mean (not divided by the length of I yet)\n    The interval I can intersect J or not\n    \n    :param I: a interval (start, stop), or None\n    :param J: a non empty interval\n    :return: the integral of distances d(x, J) over x \\in I\n    \"\"\"\n    # I and J are single intervals (not generic sets)\n    # I is a predicted interval in the range of affiliation of J\n    \n    def f(I_cut):\n        return(integral_mini_interval(I_cut, J))\n    # If I_middle is fully included into J, it is\n    # the distance to J is always 0\n    def f0(I_middle):\n        return(0)\n\n    cut_into_three = cut_into_three_func(I, J)\n    # Distance for now, not the mean:\n    # Distance left: Between cut_into_three[0] and the point min(J)\n    d_left = f(cut_into_three[0])\n    # Distance middle: Between cut_into_three[1] = I inter J, and J\n    d_middle = f0(cut_into_three[1])\n    # Distance right: Between cut_into_three[2] and the point max(J)\n    d_right = f(cut_into_three[2])\n    # It's an integral so summable\n    return(d_left + d_middle + d_right)\n\ndef integral_mini_interval_P_CDFmethod__min_piece(I, J, E):\n    \"\"\"\n    Helper of `integral_mini_interval_Pprecision_CDFmethod`\n    In the specific case where interval I is located outside J,\n    compute the integral $\\int_{d_min}^{d_max} \\min(m, x) dx$, with:\n    - m the smallest distance from J to E,\n    - d_min the smallest distance d(x, J) from x \\in I to J\n    - d_max the largest distance d(x, J) from x \\in I to J\n    \n    :param I: a single predicted interval, a non empty interval (start, stop)\n    :param J: ground truth interval, a non empty interval, with empty intersection with I\n    :param E: the affiliation/influence zone for J, represented as a couple (start, stop)\n    :return: the integral $\\int_{d_min}^{d_max} \\min(m, x) dx$\n    \"\"\"\n    if interval_intersection(I, J) is not None:\n        raise ValueError('I and J should have a void intersection')\n    if not interval_subset(J, E):\n        raise ValueError('J should be included in E')\n    if not interval_subset(I, E):\n        raise ValueError('I should be included in E')\n\n    e_min = min(E)\n    j_min = min(J)\n    j_max = max(J)\n    e_max = max(E)\n    i_min = min(I)\n    i_max = max(I)\n  \n    d_min = max(i_min - j_max, j_min - i_max)\n    d_max = max(i_max - j_max, j_min - i_min)\n    m = min(j_min - e_min, e_max - j_max)\n    A = min(d_max, m)**2 - min(d_min, m)**2\n    B = max(d_max, m) - max(d_min, m)\n    C = (1/2)*A + m*B\n    return(C)\n\ndef integral_mini_interval_Pprecision_CDFmethod(I, J, E):\n    \"\"\"\n    Integral of the probability of distances over the interval I.\n    In the specific case where interval I is located outside J,\n    compute the integral $\\int_{x \\in I} Fbar(dist(x,J)) dx$.\n    This is the *integral* i.e. the sum (not the mean)\n    \n    :param I: a single predicted interval, a non empty interval (start, stop)\n    :param J: ground truth interval, a non empty interval, with empty intersection with I\n    :param E: the affiliation/influence zone for J, represented as a couple (start, stop)\n    :return: the integral $\\int_{x \\in I} Fbar(dist(x,J)) dx$\n    \"\"\"\n    integral_min_piece = integral_mini_interval_P_CDFmethod__min_piece(I, J, E)\n  \n    e_min = min(E)\n    j_min = min(J)\n    j_max = max(J)\n    e_max = max(E)\n    i_min = min(I)\n    i_max = max(I)\n    d_min = max(i_min - j_max, j_min - i_max)\n    d_max = max(i_max - j_max, j_min - i_min)\n    integral_linear_piece = (1/2)*(d_max**2 - d_min**2)\n    integral_remaining_piece = (j_max - j_min)*(i_max - i_min)\n    \n    DeltaI = i_max - i_min\n    DeltaE = e_max - e_min\n    \n    output = DeltaI - (1/DeltaE)*(integral_min_piece + integral_linear_piece + integral_remaining_piece)\n    return(output)\n\ndef integral_interval_probaCDF_precision(I, J, E):\n    \"\"\"\n    Integral of the probability of distances over the interval I.\n    Compute the integral $\\int_{x \\in I} Fbar(dist(x,J)) dx$.\n    This is the *integral* i.e. the sum (not the mean)\n    \n    :param I: a single (non empty) predicted interval in the zone of affiliation of J\n    :param J: ground truth interval\n    :param E: affiliation/influence zone for J\n    :return: the integral $\\int_{x \\in I} Fbar(dist(x,J)) dx$\n    \"\"\"\n    # I and J are single intervals (not generic sets)\n    def f(I_cut):\n        if I_cut is None:\n            return(0)\n        else:\n            return(integral_mini_interval_Pprecision_CDFmethod(I_cut, J, E))\n            \n    # If I_middle is fully included into J, it is\n    # integral of 1 on the interval I_middle, so it's |I_middle|\n    def f0(I_middle):\n        if I_middle is None:\n            return(0)\n        else:\n            return(max(I_middle) - min(I_middle))\n    \n    cut_into_three = cut_into_three_func(I, J)\n    # Distance for now, not the mean:\n    # Distance left: Between cut_into_three[0] and the point min(J)\n    d_left = f(cut_into_three[0])\n    # Distance middle: Between cut_into_three[1] = I inter J, and J\n    d_middle = f0(cut_into_three[1])\n    # Distance right: Between cut_into_three[2] and the point max(J)\n    d_right = f(cut_into_three[2])\n    # It's an integral so summable\n    return(d_left + d_middle + d_right)\n\ndef cut_J_based_on_mean_func(J, e_mean):\n    \"\"\"\n    Helper function for the recall.\n    Partition J into two intervals: before and after e_mean\n    (e_mean represents the center element of E the zone of affiliation)\n    \n    :param J: ground truth interval\n    :param e_mean: a float number (center value of E)\n    :return: a couple partitionning J into (J_before, J_after)\n    \"\"\"\n    if J is None:\n        J_before = None\n        J_after = None\n    elif e_mean >= max(J):\n        J_before = J\n        J_after = None\n    elif e_mean <= min(J):\n        J_before = None\n        J_after = J\n    else: # e_mean is across J\n        J_before = (min(J), e_mean)\n        J_after = (e_mean, max(J))\n        \n    return((J_before, J_after))\n\ndef integral_mini_interval_Precall_CDFmethod(I, J, E):\n    \"\"\"\n    Integral of the probability of distances over the interval J.\n    In the specific case where interval J is located outside I,\n    compute the integral $\\int_{y \\in J} Fbar_y(dist(y,I)) dy$.\n    This is the *integral* i.e. the sum (not the mean)\n    \n    :param I: a single (non empty) predicted interval\n    :param J: ground truth (non empty) interval, with empty intersection with I\n    :param E: the affiliation/influence zone for J, represented as a couple (start, stop)\n    :return: the integral $\\int_{y \\in J} Fbar_y(dist(y,I)) dy$\n    \"\"\"\n    # The interval J should be located outside I \n    # (so it's either the left piece or the right piece w.r.t I)\n    i_pivot = get_pivot_j(J, I)\n    e_min = min(E)\n    e_max = max(E)\n    e_mean = (e_min + e_max) / 2\n    \n    # If i_pivot is outside E (it's possible), then\n    # the distance is worst that any random element within E,\n    # so we set the recall to 0\n    if i_pivot <= min(E):\n        return(0)\n    elif i_pivot >= max(E):\n        return(0)\n    # Otherwise, we have at least i_pivot in E and so d < M so min(d,M)=d\n    \n    cut_J_based_on_e_mean = cut_J_based_on_mean_func(J, e_mean)\n    J_before = cut_J_based_on_e_mean[0]\n    J_after = cut_J_based_on_e_mean[1]\n  \n    iemin_mean = (e_min + i_pivot)/2\n    cut_Jbefore_based_on_iemin_mean = cut_J_based_on_mean_func(J_before, iemin_mean)\n    J_before_closeE = cut_Jbefore_based_on_iemin_mean[0] # before e_mean and closer to e_min than i_pivot ~ J_before_before\n    J_before_closeI = cut_Jbefore_based_on_iemin_mean[1] # before e_mean and closer to i_pivot than e_min ~ J_before_after\n    \n    iemax_mean = (e_max + i_pivot)/2\n    cut_Jafter_based_on_iemax_mean = cut_J_based_on_mean_func(J_after, iemax_mean)\n    J_after_closeI = cut_Jafter_based_on_iemax_mean[0] # after e_mean and closer to i_pivot than e_max ~ J_after_before\n    J_after_closeE = cut_Jafter_based_on_iemax_mean[1] # after e_mean and closer to e_max than i_pivot ~ J_after_after\n    \n    if J_before_closeE is not None:\n        j_before_before_min = min(J_before_closeE) # == min(J)\n        j_before_before_max = max(J_before_closeE)\n    else:\n        j_before_before_min = math.nan\n        j_before_before_max = math.nan\n  \n    if J_before_closeI is not None:\n        j_before_after_min = min(J_before_closeI) # == j_before_before_max if existing\n        j_before_after_max = max(J_before_closeI) # == max(J_before)\n    else:\n        j_before_after_min = math.nan\n        j_before_after_max = math.nan\n   \n    if J_after_closeI is not None:\n        j_after_before_min = min(J_after_closeI) # == min(J_after)\n        j_after_before_max = max(J_after_closeI) \n    else:\n        j_after_before_min = math.nan\n        j_after_before_max = math.nan\n    \n    if J_after_closeE is not None:\n        j_after_after_min = min(J_after_closeE) # == j_after_before_max if existing\n        j_after_after_max = max(J_after_closeE) # == max(J)\n    else:\n        j_after_after_min = math.nan\n        j_after_after_max = math.nan\n  \n    # <-- J_before_closeE --> <-- J_before_closeI --> <-- J_after_closeI --> <-- J_after_closeE -->\n    # j_bb_min       j_bb_max j_ba_min       j_ba_max j_ab_min      j_ab_max j_aa_min      j_aa_max\n    # (with `b` for before and `a` for after in the previous variable names)\n    \n    #                                          vs e_mean  m = min(t-e_min, e_max-t)  d=|i_pivot-t|   min(d,m)                            \\int min(d,m)dt   \\int d dt        \\int_(min(d,m)+d)dt                                    \\int_{t \\in J}(min(d,m)+d)dt\n    # Case J_before_closeE & i_pivot after J   before     t-e_min                    i_pivot-t       min(i_pivot-t,t-e_min) = t-e_min    t^2/2-e_min*t     i_pivot*t-t^2/2  t^2/2-e_min*t+i_pivot*t-t^2/2 = (i_pivot-e_min)*t      (i_pivot-e_min)*tB - (i_pivot-e_min)*tA = (i_pivot-e_min)*(tB-tA)\n    # Case J_before_closeI & i_pivot after J   before     t-e_min                    i_pivot-t       min(i_pivot-t,t-e_min) = i_pivot-t  i_pivot*t-t^2/2   i_pivot*t-t^2/2  i_pivot*t-t^2/2+i_pivot*t-t^2/2 = 2*i_pivot*t-t^2      2*i_pivot*tB-tB^2 - 2*i_pivot*tA + tA^2 = 2*i_pivot*(tB-tA) - (tB^2 - tA^2)\n    # Case J_after_closeI & i_pivot after J    after      e_max-t                    i_pivot-t       min(i_pivot-t,e_max-t) = i_pivot-t  i_pivot*t-t^2/2   i_pivot*t-t^2/2  i_pivot*t-t^2/2+i_pivot*t-t^2/2 = 2*i_pivot*t-t^2      2*i_pivot*tB-tB^2 - 2*i_pivot*tA + tA^2 = 2*i_pivot*(tB-tA) - (tB^2 - tA^2)\n    # Case J_after_closeE & i_pivot after J    after      e_max-t                    i_pivot-t       min(i_pivot-t,e_max-t) = e_max-t    e_max*t-t^2/2     i_pivot*t-t^2/2  e_max*t-t^2/2+i_pivot*t-t^2/2 = (e_max+i_pivot)*t-t^2  (e_max+i_pivot)*tB-tB^2 - (e_max+i_pivot)*tA + tA^2 = (e_max+i_pivot)*(tB-tA) - (tB^2 - tA^2)\n    #\n    # Case J_before_closeE & i_pivot before J  before     t-e_min                    t-i_pivot       min(t-i_pivot,t-e_min) = t-e_min    t^2/2-e_min*t     t^2/2-i_pivot*t  t^2/2-e_min*t+t^2/2-i_pivot*t = t^2-(e_min+i_pivot)*t  tB^2-(e_min+i_pivot)*tB - tA^2 + (e_min+i_pivot)*tA = (tB^2 - tA^2) - (e_min+i_pivot)*(tB-tA)\n    # Case J_before_closeI & i_pivot before J  before     t-e_min                    t-i_pivot       min(t-i_pivot,t-e_min) = t-i_pivot  t^2/2-i_pivot*t   t^2/2-i_pivot*t  t^2/2-i_pivot*t+t^2/2-i_pivot*t = t^2-2*i_pivot*t      tB^2-2*i_pivot*tB - tA^2 + 2*i_pivot*tA = (tB^2 - tA^2) - 2*i_pivot*(tB-tA)\n    # Case J_after_closeI & i_pivot before J   after      e_max-t                    t-i_pivot       min(t-i_pivot,e_max-t) = t-i_pivot  t^2/2-i_pivot*t   t^2/2-i_pivot*t  t^2/2-i_pivot*t+t^2/2-i_pivot*t = t^2-2*i_pivot*t      tB^2-2*i_pivot*tB - tA^2 + 2*i_pivot*tA = (tB^2 - tA^2) - 2*i_pivot*(tB-tA)\n    # Case J_after_closeE & i_pivot before J   after      e_max-t                    t-i_pivot       min(t-i_pivot,e_max-t) = e_max-t    e_max*t-t^2/2     t^2/2-i_pivot*t  e_max*t-t^2/2+t^2/2-i_pivot*t = (e_max-i_pivot)*t      (e_max-i_pivot)*tB - (e_max-i_pivot)*tA = (e_max-i_pivot)*(tB-tA)\n    \n    if i_pivot >= max(J):\n        part1_before_closeE = (i_pivot-e_min)*(j_before_before_max - j_before_before_min) # (i_pivot-e_min)*(tB-tA) # j_before_before_max - j_before_before_min\n        part2_before_closeI = 2*i_pivot*(j_before_after_max-j_before_after_min) - (j_before_after_max**2 - j_before_after_min**2) # 2*i_pivot*(tB-tA) - (tB^2 - tA^2) # j_before_after_max - j_before_after_min\n        part3_after_closeI = 2*i_pivot*(j_after_before_max-j_after_before_min) - (j_after_before_max**2 - j_after_before_min**2) # 2*i_pivot*(tB-tA) - (tB^2 - tA^2) # j_after_before_max - j_after_before_min  \n        part4_after_closeE = (e_max+i_pivot)*(j_after_after_max-j_after_after_min) - (j_after_after_max**2 - j_after_after_min**2) # (e_max+i_pivot)*(tB-tA) - (tB^2 - tA^2) # j_after_after_max - j_after_after_min\n        out_parts = [part1_before_closeE, part2_before_closeI, part3_after_closeI, part4_after_closeE]\n    elif i_pivot <= min(J):\n        part1_before_closeE = (j_before_before_max**2 - j_before_before_min**2) - (e_min+i_pivot)*(j_before_before_max-j_before_before_min) # (tB^2 - tA^2) - (e_min+i_pivot)*(tB-tA) # j_before_before_max - j_before_before_min\n        part2_before_closeI = (j_before_after_max**2 - j_before_after_min**2) - 2*i_pivot*(j_before_after_max-j_before_after_min) # (tB^2 - tA^2) - 2*i_pivot*(tB-tA) # j_before_after_max - j_before_after_min\n        part3_after_closeI = (j_after_before_max**2 - j_after_before_min**2) - 2*i_pivot*(j_after_before_max - j_after_before_min) # (tB^2 - tA^2) - 2*i_pivot*(tB-tA) # j_after_before_max - j_after_before_min\n        part4_after_closeE = (e_max-i_pivot)*(j_after_after_max - j_after_after_min) # (e_max-i_pivot)*(tB-tA) # j_after_after_max - j_after_after_min\n        out_parts = [part1_before_closeE, part2_before_closeI, part3_after_closeI, part4_after_closeE]\n    else:\n        raise ValueError('The i_pivot should be outside J')\n    \n    out_integral_min_dm_plus_d = _sum_wo_nan(out_parts) # integral on all J, i.e. sum of the disjoint parts\n\n    # We have for each point t of J:\n    # \\bar{F}_{t, recall}(d) = 1 - (1/|E|) * (min(d,m) + d)\n    # Since t is a single-point here, and we are in the case where i_pivot is inside E.\n    # The integral is then given by:\n    # C = \\int_{t \\in J} \\bar{F}_{t, recall}(D(t)) dt\n    #   = \\int_{t \\in J} 1 - (1/|E|) * (min(d,m) + d) dt\n    #   = |J| - (1/|E|) * [\\int_{t \\in J} (min(d,m) + d) dt]\n    #   = |J| - (1/|E|) * out_integral_min_dm_plus_d    \n    DeltaJ = max(J) - min(J)\n    DeltaE = max(E) - min(E)\n    C = DeltaJ - (1/DeltaE) * out_integral_min_dm_plus_d\n    \n    return(C)\n\ndef integral_interval_probaCDF_recall(I, J, E):\n    \"\"\"\n    Integral of the probability of distances over the interval J.\n    Compute the integral $\\int_{y \\in J} Fbar_y(dist(y,I)) dy$.\n    This is the *integral* i.e. the sum (not the mean)\n\n    :param I: a single (non empty) predicted interval\n    :param J: ground truth (non empty) interval\n    :param E: the affiliation/influence zone for J\n    :return: the integral $\\int_{y \\in J} Fbar_y(dist(y,I)) dy$\n    \"\"\"\n    # I and J are single intervals (not generic sets)\n    # E is the outside affiliation interval of J (even for recall!)\n    # (in particular J \\subset E)\n    #\n    # J is the portion of the ground truth affiliated to I\n    # I is a predicted interval (can be outside E possibly since it's recall)\n    def f(J_cut):\n        if J_cut is None:\n            return(0)\n        else:\n            return integral_mini_interval_Precall_CDFmethod(I, J_cut, E)\n\n    # If J_middle is fully included into I, it is\n    # integral of 1 on the interval J_middle, so it's |J_middle|\n    def f0(J_middle):\n        if J_middle is None:\n            return(0)\n        else:\n            return(max(J_middle) - min(J_middle))\n    \n    cut_into_three = cut_into_three_func(J, I) # it's J that we cut into 3, depending on the position w.r.t I\n    # since we integrate over J this time.\n    #\n    # Distance for now, not the mean:\n    # Distance left: Between cut_into_three[0] and the point min(I)\n    d_left = f(cut_into_three[0])\n    # Distance middle: Between cut_into_three[1] = J inter I, and I\n    d_middle = f0(cut_into_three[1])\n    # Distance right: Between cut_into_three[2] and the point max(I)\n    d_right = f(cut_into_three[2])\n    # It's an integral so summable\n    return(d_left + d_middle + d_right)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_single_ground_truth_event.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport math\nfrom other_anomaly_baselines.metrics.affiliation._affiliation_zone import (\n        get_all_E_gt_func, \n        affiliation_partition)\nfrom other_anomaly_baselines.metrics.affiliation._integral_interval import (\n        integral_interval_distance,\n        integral_interval_probaCDF_precision, \n        integral_interval_probaCDF_recall, \n        interval_length,\n        sum_interval_lengths)\n\ndef affiliation_precision_distance(Is = [(1,2),(3,4),(5,6)], J = (2,5.5)):\n    \"\"\"\n    Compute the individual average distance from Is to a single ground truth J\n    \n    :param Is: list of predicted events within the affiliation zone of J\n    :param J: couple representating the start and stop of a ground truth interval\n    :return: individual average precision directed distance number\n    \"\"\"\n    if all([I is None for I in Is]): # no prediction in the current area\n        return(math.nan) # undefined\n    return(sum([integral_interval_distance(I, J) for I in Is]) / sum_interval_lengths(Is))\n\ndef affiliation_precision_proba(Is = [(1,2),(3,4),(5,6)], J = (2,5.5), E = (0,8)):\n    \"\"\"\n    Compute the individual precision probability from Is to a single ground truth J\n    \n    :param Is: list of predicted events within the affiliation zone of J\n    :param J: couple representating the start and stop of a ground truth interval\n    :param E: couple representing the start and stop of the zone of affiliation of J\n    :return: individual precision probability in [0, 1], or math.nan if undefined\n    \"\"\"\n    if all([I is None for I in Is]): # no prediction in the current area\n        return(math.nan) # undefined\n    return(sum([integral_interval_probaCDF_precision(I, J, E) for I in Is]) / sum_interval_lengths(Is))\n\ndef affiliation_recall_distance(Is = [(1,2),(3,4),(5,6)], J = (2,5.5)):\n    \"\"\"\n    Compute the individual average distance from a single J to the predictions Is\n    \n    :param Is: list of predicted events within the affiliation zone of J\n    :param J: couple representating the start and stop of a ground truth interval\n    :return: individual average recall directed distance number\n    \"\"\"\n    Is = [I for I in Is if I is not None] # filter possible None in Is\n    if len(Is) == 0: # there is no prediction in the current area\n        return(math.inf)\n    E_gt_recall = get_all_E_gt_func(Is, (-math.inf, math.inf))  # here from the point of view of the predictions\n    Js = affiliation_partition([J], E_gt_recall) # partition of J depending of proximity with Is\n    return(sum([integral_interval_distance(J[0], I) for I, J in zip(Is, Js)]) / interval_length(J))\n\ndef affiliation_recall_proba(Is = [(1,2),(3,4),(5,6)], J = (2,5.5), E = (0,8)):\n    \"\"\"\n    Compute the individual recall probability from a single ground truth J to Is\n    \n    :param Is: list of predicted events within the affiliation zone of J\n    :param J: couple representating the start and stop of a ground truth interval\n    :param E: couple representing the start and stop of the zone of affiliation of J\n    :return: individual recall probability in [0, 1]\n    \"\"\"\n    Is = [I for I in Is if I is not None] # filter possible None in Is\n    if len(Is) == 0: # there is no prediction in the current area\n        return(0)\n    E_gt_recall = get_all_E_gt_func(Is, E) # here from the point of view of the predictions\n    Js = affiliation_partition([J], E_gt_recall) # partition of J depending of proximity with Is\n    return(sum([integral_interval_probaCDF_recall(I, J[0], E) for I, J in zip(Is, Js)]) / interval_length(J))\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/generics.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom itertools import groupby\nfrom operator import itemgetter\nimport math\nimport gzip\nimport glob\nimport os\n\ndef convert_vector_to_events(vector = [0, 1, 1, 0, 0, 1, 0]):\n    \"\"\"\n    Convert a binary vector (indicating 1 for the anomalous instances)\n    to a list of events. The events are considered as durations,\n    i.e. setting 1 at index i corresponds to an anomalous interval [i, i+1).\n    \n    :param vector: a list of elements belonging to {0, 1}\n    :return: a list of couples, each couple representing the start and stop of\n    each event\n    \"\"\"\n    positive_indexes = [idx for idx, val in enumerate(vector) if val > 0]\n    events = []\n    for k, g in groupby(enumerate(positive_indexes), lambda ix : ix[0] - ix[1]):\n        cur_cut = list(map(itemgetter(1), g))\n        events.append((cur_cut[0], cur_cut[-1]))\n    \n    # Consistent conversion in case of range anomalies (for indexes):\n    # A positive index i is considered as the interval [i, i+1),\n    # so the last index should be moved by 1\n    events = [(x, y+1) for (x,y) in events]\n    # print(\"events = \", events)\n        \n    return(events)\n\ndef infer_Trange(events_pred, events_gt):\n    \"\"\"\n    Given the list of events events_pred and events_gt, get the\n    smallest possible Trange corresponding to the start and stop indexes \n    of the whole series.\n    Trange will not influence the measure of distances, but will impact the\n    measures of probabilities.\n    \n    :param events_pred: a list of couples corresponding to predicted events\n    :param events_gt: a list of couples corresponding to ground truth events\n    :return: a couple corresponding to the smallest range containing the events\n    \"\"\"\n    if len(events_gt) == 0:\n        raise ValueError('The gt events should contain at least one event')\n    if len(events_pred) == 0:\n        # empty prediction, base Trange only on events_gt (which is non empty)\n        return(infer_Trange(events_gt, events_gt))\n        \n    min_pred = min([x[0] for x in events_pred])\n    min_gt = min([x[0] for x in events_gt])\n    max_pred = max([x[1] for x in events_pred])\n    max_gt = max([x[1] for x in events_gt])\n    Trange = (min(min_pred, min_gt), max(max_pred, max_gt))\n    return(Trange)\n\ndef has_point_anomalies(events):\n    \"\"\"\n    Checking whether events contain point anomalies, i.e.\n    events starting and stopping at the same time.\n    \n    :param events: a list of couples corresponding to predicted events\n    :return: True is the events have any point anomalies, False otherwise\n    \"\"\"\n    if len(events) == 0:\n        return(False)\n    return(min([x[1] - x[0] for x in events]) == 0)\n\ndef _sum_wo_nan(vec):\n    \"\"\"\n    Sum of elements, ignoring math.isnan ones\n    \n    :param vec: vector of floating numbers\n    :return: sum of the elements, ignoring math.isnan ones\n    \"\"\"\n    vec_wo_nan = [e for e in vec if not math.isnan(e)]\n    return(sum(vec_wo_nan))\n    \ndef _len_wo_nan(vec):\n    \"\"\"\n    Count of elements, ignoring math.isnan ones\n    \n    :param vec: vector of floating numbers\n    :return: count of the elements, ignoring math.isnan ones\n    \"\"\"\n    vec_wo_nan = [e for e in vec if not math.isnan(e)]\n    return(len(vec_wo_nan))\n\ndef read_gz_data(filename = 'data/machinetemp_groundtruth.gz'):\n    \"\"\"\n    Load a file compressed with gz, such that each line of the\n    file is either 0 (representing a normal instance) or 1 (representing)\n    an anomalous instance.\n    :param filename: file path to the gz compressed file\n    :return: list of integers with either 0 or 1\n    \"\"\"\n    with gzip.open(filename, 'rb') as f:\n        content = f.read().splitlines()\n    content = [int(x) for x in content]\n    return(content)\n\ndef read_all_as_events():\n    \"\"\"\n    Load the files contained in the folder `data/` and convert\n    to events. The length of the series is kept.\n    The convention for the file name is: `dataset_algorithm.gz`\n    :return: two dictionaries:\n        - the first containing the list of events for each dataset and algorithm,\n        - the second containing the range of the series for each dataset\n    \"\"\"\n    filepaths = glob.glob('data/*.gz')\n    datasets = dict()\n    Tranges = dict()\n    for filepath in filepaths:\n        vector = read_gz_data(filepath)\n        events = convert_vector_to_events(vector)\n        # ad hoc cut for those files\n        cut_filepath = (os.path.split(filepath)[1]).split('_')\n        data_name = cut_filepath[0]\n        algo_name = (cut_filepath[1]).split('.')[0]\n        if not data_name in datasets:\n            datasets[data_name] = dict()\n            Tranges[data_name] = (0, len(vector))\n        datasets[data_name][algo_name] = events\n    return(datasets, Tranges)\n\ndef f1_func(p, r):\n    \"\"\"\n    Compute the f1 function\n    :param p: precision numeric value\n    :param r: recall numeric value\n    :return: f1 numeric value\n    \"\"\"\n    return(2*p*r/(p+r))\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/metrics.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom other_anomaly_baselines.metrics.affiliation.generics import (\n        infer_Trange,\n        has_point_anomalies, \n        _len_wo_nan, \n        _sum_wo_nan,\n        read_all_as_events)\nfrom other_anomaly_baselines.metrics.affiliation._affiliation_zone import (\n        get_all_E_gt_func, \n        affiliation_partition)\nfrom other_anomaly_baselines.metrics.affiliation._single_ground_truth_event import (\n        affiliation_precision_distance,\n        affiliation_recall_distance,\n        affiliation_precision_proba,\n        affiliation_recall_proba)\n\ndef test_events(events):\n    \"\"\"\n    Verify the validity of the input events\n    :param events: list of events, each represented by a couple (start, stop)\n    :return: None. Raise an error for incorrect formed or non ordered events\n    \"\"\"\n    if type(events) is not list:\n        raise TypeError('Input `events` should be a list of couples')\n    if not all([type(x) is tuple for x in events]):\n        raise TypeError('Input `events` should be a list of tuples')\n    if not all([len(x) == 2 for x in events]):\n        raise ValueError('Input `events` should be a list of couples (start, stop)')\n    if not all([x[0] <= x[1] for x in events]):\n        raise ValueError('Input `events` should be a list of couples (start, stop) with start <= stop')\n    if not all([events[i][1] < events[i+1][0] for i in range(len(events) - 1)]):\n        raise ValueError('Couples of input `events` should be disjoint and ordered')\n\ndef pr_from_events(events_pred, events_gt, Trange):\n    \"\"\"\n    Compute the affiliation metrics including the precision/recall in [0,1],\n    along with the individual precision/recall distances and probabilities\n    \n    :param events_pred: list of predicted events, each represented by a couple\n    indicating the start and the stop of the event\n    :param events_gt: list of ground truth events, each represented by a couple\n    indicating the start and the stop of the event\n    :param Trange: range of the series where events_pred and events_gt are included,\n    represented as a couple (start, stop)\n    :return: dictionary with precision, recall, and the individual metrics\n    \"\"\"\n    # testing the inputs\n    test_events(events_pred)\n    test_events(events_gt)\n    \n    # other tests\n    minimal_Trange = infer_Trange(events_pred, events_gt)\n    if not Trange[0] <= minimal_Trange[0]:\n        raise ValueError('`Trange` should include all the events')\n    if not minimal_Trange[1] <= Trange[1]:\n        raise ValueError('`Trange` should include all the events')\n    \n    if len(events_gt) == 0:\n        raise ValueError('Input `events_gt` should have at least one event')\n\n    if has_point_anomalies(events_pred) or has_point_anomalies(events_gt):\n        raise ValueError('Cannot manage point anomalies currently')\n\n    if Trange is None:\n        # Set as default, but Trange should be indicated if probabilities are used\n        raise ValueError('Trange should be indicated (or inferred with the `infer_Trange` function')\n\n    E_gt = get_all_E_gt_func(events_gt, Trange)\n    aff_partition = affiliation_partition(events_pred, E_gt)\n\n    # Computing precision distance\n    d_precision = [affiliation_precision_distance(Is, J) for Is, J in zip(aff_partition, events_gt)]\n    \n    # Computing recall distance\n    d_recall = [affiliation_recall_distance(Is, J) for Is, J in zip(aff_partition, events_gt)]\n\n    # Computing precision\n    p_precision = [affiliation_precision_proba(Is, J, E) for Is, J, E in zip(aff_partition, events_gt, E_gt)]\n\n    # Computing recall\n    p_recall = [affiliation_recall_proba(Is, J, E) for Is, J, E in zip(aff_partition, events_gt, E_gt)]\n\n    if _len_wo_nan(p_precision) > 0:\n        p_precision_average = _sum_wo_nan(p_precision) / _len_wo_nan(p_precision)\n    else:\n        p_precision_average = p_precision[0] # math.nan\n    p_recall_average = sum(p_recall) / len(p_recall)\n\n    dict_out = dict({'precision': p_precision_average,\n                     'recall': p_recall_average,\n                     'individual_precision_probabilities': p_precision,\n                     'individual_recall_probabilities': p_recall,\n                     'individual_precision_distances': d_precision,\n                     'individual_recall_distances': d_recall})\n    return(dict_out)\n\ndef produce_all_results():\n    \"\"\"\n    Produce the affiliation precision/recall for all files\n    contained in the `data` repository\n    :return: a dictionary indexed by data names, each containing a dictionary\n    indexed by algorithm names, each containing the results of the affiliation\n    metrics (precision, recall, individual probabilities and distances)\n    \"\"\"\n    datasets, Tranges = read_all_as_events() # read all the events in folder `data`\n    results = dict()\n    for data_name in datasets.keys():\n        results_data = dict()\n        for algo_name in datasets[data_name].keys():\n            if algo_name != 'groundtruth':\n                results_data[algo_name] = pr_from_events(datasets[data_name][algo_name],\n                                                         datasets[data_name]['groundtruth'],\n                                                         Tranges[data_name])\n        results[data_name] = results_data\n    return(results)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/combine_all_scores.py",
    "content": "from f1_score_f1_pa import *\nfrom fc_score import *\nfrom precision_at_k import *\nfrom customizable_f1_score import *\nfrom AUC import *\nfrom Matthews_correlation_coefficient import *\nfrom affiliation.generics import convert_vector_to_events\nfrom affiliation.metrics import pr_from_events\nfrom vus.models.feature import Window\nfrom vus.metrics import get_range_vus_roc\n\n\n\ndef combine_all_evaluation_scores(y_test, pred_labels, anomaly_scores):\n    events_pred = convert_vector_to_events(y_test) # [(4, 5), (8, 9)]\n    events_gt = convert_vector_to_events(pred_labels)     # [(3, 4), (7, 10)]\n    Trange = (0, len(y_test))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    true_events = get_events(y_test)\n    _, _, _, f1_score_ori, f05_score_ori = get_accuracy_precision_recall_fscore(y_test, pred_labels)\n    f1_score_pa = get_point_adjust_scores(y_test, pred_labels, true_events)[5]\n    pa_accuracy, pa_precision, pa_recall, pa_f_score = get_adjust_F1PA(y_test, pred_labels)\n    range_f_score = customizable_f1_score(y_test, pred_labels)\n    _, _, f1_score_c = get_composite_fscore_raw(y_test, pred_labels,  true_events, return_prec_rec=True)\n    precision_k = precision_at_k(y_test, anomaly_scores, pred_labels)\n    point_auc = point_wise_AUC(pred_labels, y_test)\n    range_auc = Range_AUC(pred_labels, y_test)\n    MCC_score = MCC(y_test, pred_labels)\n    results = get_range_vus_roc(y_test, pred_labels, 100) # slidingWindow = 100 default\n\n    \n    score_list = {\"f1_score_ori\": f1_score_ori, \n                  \"f05_score_ori\" : f05_score_ori, \n                  \"f1_score_pa\": f1_score_pa,\n                  \"pa_accuracy\":pa_accuracy, \n                  \"pa_precision\":pa_precision, \n                  \"pa_recall\":pa_recall, \n                  \"pa_f_score\":pa_f_score,\n                  \"range_f_score\": range_f_score,\n                  \"f1_score_c\": f1_score_c, \n                  \"precision_k\": precision_k,\n                  \"point_auc\": point_auc,\n                  \"range_auc\": range_auc, \n                  \"MCC_score\":MCC_score, \n                  \"Affiliation precision\": affiliation['precision'], \n                  \"Affiliation recall\": affiliation['recall'],\n                  \"R_AUC_ROC\": results[\"R_AUC_ROC\"], \n                  \"R_AUC_PR\": results[\"R_AUC_PR\"],\n                  \"VUS_ROC\": results[\"VUS_ROC\"], \n                  \"VUS_PR\": results[\"VUS_PR\"]}\n    \n    return score_list\n\n\ndef main():\n    y_test = np.zeros(100)\n    y_test[10:20] = 1\n    y_test[50:60] = 1\n    pred_labels = np.zeros(100)\n    pred_labels[15:17] = 1\n    pred_labels[55:62] = 1\n    anomaly_scores = np.zeros(100)\n    anomaly_scores[15:17] = 0.7\n    anomaly_scores[55:62] = 0.6\n    pred_labels[51:55] = 1\n    true_events = get_events(y_test)\n    scores = combine_all_evaluation_scores(y_test, pred_labels, anomaly_scores)\n    # scores = test(y_test, pred_labels)\n    for key,value in scores.items():\n        print(key,' : ',value)\n\n    \nif __name__ == \"__main__\":\n    main()"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/customizable_f1_score.py",
    "content": "# used by paper: Exathlon: A Benchmark for Explainable Anomaly Detection over Time Series_VLDB 2021\n# github: https://github.com/exathlonbenchmark/exathlon\nimport numpy as np\nfrom other_anomaly_baselines.metrics.evaluate_utils import range_convers_new\n\n# the existence reward on the bias\ndef b(bias, i, length):\n    if bias == 'flat':\n        return 1\n    elif bias == 'front-end bias':\n        return length - i + 1\n    elif bias == 'back-end bias':\n        return i\n    else:\n        if i <= length / 2:\n            return i\n        else:\n            return length - i + 1\n\n\ndef w(AnomalyRange, p):\n    MyValue = 0\n    MaxValue = 0\n    start = AnomalyRange[0]\n    AnomalyLength = AnomalyRange[1] - AnomalyRange[0] + 1\n    # flat/'front-end bias'/'back-end bias'\n    bias = 'flat'\n    for i in range(start, start + AnomalyLength):\n        bi = b(bias, i, AnomalyLength)\n        MaxValue += bi\n        if i in p:\n            MyValue += bi\n    return MyValue / MaxValue\n\n\ndef Cardinality_factor(Anomolyrange, Prange):\n    score = 0\n    start = Anomolyrange[0]\n    end = Anomolyrange[1]\n    for i in Prange:\n        if start <= i[0] <= end:\n            score += 1\n        elif i[0] <= start <= i[1]:\n            score += 1\n        elif i[0] <= end <= i[1]:\n            score += 1\n        elif start >= i[0] and end <= i[1]:\n            score += 1\n    if score == 0:\n        return 0\n    else:\n        return 1 / score\n\n\ndef existence_reward(labels, preds):\n    '''\n    labels: list of ordered pair\n    preds predicted data\n    '''\n\n    score = 0\n    for i in labels:\n        if np.sum(np.multiply(preds <= i[1], preds >= i[0])) > 0:\n            score += 1\n    return score\n\n\ndef range_recall_new(labels, preds, alpha):\n    p = np.where(preds == 1)[0]  # positions of predicted label==1\n    range_pred = range_convers_new(preds)\n    range_label = range_convers_new(labels)\n\n    Nr = len(range_label)  # total # of real anomaly segments\n\n    ExistenceReward = existence_reward(range_label, p)\n\n    OverlapReward = 0\n    for i in range_label:\n        OverlapReward += w(i, p) * Cardinality_factor(i, range_pred)\n\n    score = alpha * ExistenceReward + (1 - alpha) * OverlapReward\n    if Nr != 0:\n        return score / Nr, ExistenceReward / Nr, OverlapReward / Nr\n    else:\n        return 0, 0, 0\n\n\ndef customizable_f1_score(y_test, pred_labels,  alpha=0.2):\n    label = y_test\n    preds = pred_labels\n    Rrecall, ExistenceReward, OverlapReward = range_recall_new(label, preds, alpha)\n    Rprecision = range_recall_new(preds, label, 0)[0]\n\n    if Rprecision + Rrecall == 0:\n        Rf = 0\n    else:\n        Rf = 2 * Rrecall * Rprecision / (Rprecision + Rrecall)\n    return Rf\n\n\ndef main():\n    y_test = np.zeros(100)\n    y_test[10:20] = 1\n    y_test[50:60] = 1\n    pred_labels = np.zeros(100)\n    pred_labels[15:19] = 1\n    pred_labels[55:62] = 1\n    # pred_labels[51:55] = 1\n    # true_events = get_events(y_test)\n    Rf = customizable_f1_score(y_test, pred_labels)\n    print(\"Rf: {}\".format(Rf))\n\n\nif __name__ == \"__main__\":\n    main()"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/evaluate_utils.py",
    "content": "import numpy as np\nfrom statsmodels.tsa.stattools import acf\nfrom scipy.signal import argrelextrema\n\n\ndef get_composite_fscore_from_scores(score_t_test, thres, true_events, prec_t, return_prec_rec=False):\n    pred_labels = score_t_test > thres\n    tp = np.sum([pred_labels[start:end + 1].any() for start, end in true_events.values()])\n    fn = len(true_events) - tp\n    rec_e = tp / (tp + fn)\n    fscore_c = 2 * rec_e * prec_t / (rec_e + prec_t)\n    if prec_t == 0 and rec_e == 0:\n        fscore_c = 0\n    if return_prec_rec:\n        return prec_t, rec_e, fscore_c\n    return fscore_c\n\n\nclass NptConfig:\n    def __init__(self, config_dict):\n        for k, v in config_dict.items():\n            setattr(self, k, v)\n\ndef find_length(data):\n    if len(data.shape) > 1:\n        return 0\n    data = data[:min(20000, len(data))]\n\n    base = 3\n    auto_corr = acf(data, nlags=400, fft=True)[base:]\n\n    local_max = argrelextrema(auto_corr, np.greater)[0]\n    try:\n        max_local_max = np.argmax([auto_corr[lcm] for lcm in local_max])\n        if local_max[max_local_max] < 3 or local_max[max_local_max] > 300:\n            return 125\n        return local_max[max_local_max] + base\n    except:\n        return 125\n\n\ndef range_convers_new(label):\n    '''\n    input: arrays of binary values\n    output: list of ordered pair [[a0,b0], [a1,b1]... ] of the inputs\n    '''\n    L = []\n    i = 0\n    j = 0\n    while j < len(label):\n        while label[i] == 0:\n            i += 1\n            if i >= len(label):\n                break\n        j = i + 1\n        if j >= len(label):\n            if j == len(label):\n                L.append((i, j - 1))\n            break\n        while label[j] != 0:\n            j += 1\n            if j >= len(label):\n                L.append((i, j - 1))\n                break\n        if j >= len(label):\n            break\n        L.append((i, j - 1))\n        i = j\n    return L"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/evaluator.py",
    "content": "import logging\nimport os\nimport pickle\nimport copy\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom logger_configs.configurations import datasets_config, default_thres_config\nfrom datasets.data_preprocess.dataset import get_events\nfrom logger_configs.logger import init_logging\nfrom src.evaluation.evaluation_utils import get_dataset_class, get_algo_class, get_chan_num, collect_eval_metrics, \\\n    combine_entities_eval_metrics, get_dynamic_scores, get_gaussian_kernel_scores\nfrom src.evaluation.evaluation_utils import fit_distributions, get_scores_channelwise\nfrom src.algorithms.algorithm_utils import load_torch_algo\nfrom src.evaluation.trainer import Trainer\n\n\ndef evaluate(saved_model_root, logger, thres_methods=[\"top_k_time\", \"best_f1_test\"], eval_root_cause=True,\n             point_adjust=False, eval_R_model=True, eval_dyn=False, thres_config=None,\n             telem_only=True, make_plots=[\"prc\", \"score_t\"], composite_best_f1=False):\n    seed = 42\n    saved_model_folders = os.listdir(saved_model_root)\n    saved_model_folders.sort()  # Sort directories in alphabetical order\n    plots_dir = os.path.join(saved_model_root, \"plots\")\n    os.makedirs(plots_dir, exist_ok=True)\n\n    # Initialize dictionary structure to collect results from each entity\n    algo_results = {\"hr_100_all\": [], \"hr_150_all\": [], \"rc_top3_all\": [], \"val_recons_err\": [], \"val_loss\": [],\n                    \"std_scores_train\": [], \"auroc\": [], \"avg_prec\": []}\n    eval_methods = [\"time-wise\"]\n    for thres_method in thres_methods + [\"tail_prob\", \"pot\"]:\n        algo_results[thres_method] = {\"hr_100_tp\": [], \"hr_150_tp\": [], \"rc_top3_tp\": [], \"opt_thres\": [],\n                                      \"fscore_comp\": [], \"rec_e\": []}\n        for eval_method in eval_methods:\n            algo_results[thres_method][eval_method] = {\"tp\": [], \"fp\": [], \"fn\": []}\n\n    algo_R_results = copy.deepcopy(algo_results)\n    telemanom_gauss_s_results = copy.deepcopy(algo_results)\n    algo_dyn_results = copy.deepcopy(algo_results)\n    algo_dyn_gauss_conv_results = copy.deepcopy(algo_results)\n    path_decomposition = os.path.normpath(saved_model_root).split(os.path.sep)\n    algo_name = path_decomposition[-3]\n    me_ds_name = path_decomposition[-4].split(\"_me\")[0]\n    ds_class = get_dataset_class(me_ds_name)\n\n    rca_possible = eval_root_cause\n    thres_config_dict = None\n\n    for folder_name in saved_model_folders:\n        if \".ini\" in folder_name or \".csv\" in folder_name or \".pdf\" in folder_name:\n            continue\n        elif os.path.split(folder_name)[-1] == \"plots\":\n            continue\n\n        # get dataset\n        entity = os.path.split(folder_name)[-1]\n        if me_ds_name in [\"msl\", \"smd\", \"smap\"]:\n            entity = entity.split(\"-\", 1)[1]\n        ds_init_params = {\"seed\": seed, \"entity\": entity}\n        if me_ds_name == \"swat-long\":\n            ds_init_params[\"shorten_long\"] = False\n        if me_ds_name == \"damadics-s\":\n            ds_init_params[\"drop_init_test\"] = True\n        dataset = ds_class(**ds_init_params)\n        plots_name = os.path.join(plots_dir, algo_name + \"_\" + me_ds_name + \"_\" + entity + \"_\")\n        # ds_name = dataset.name\n        logger.info(\"Processing Folder name: {}, {} on me dataset {}, entity {}\".format(folder_name, algo_name,\n                                                                                  me_ds_name, entity))\n        if thres_config is not None:\n            thres_config_dict = thres_config(me_ds_name)\n        else:\n            thres_config_dict = default_thres_config\n\n        # get test scores from pkl\n        raw_preds_file = os.path.join(saved_model_root, folder_name, \"raw_predictions\")\n        try:\n            with open(raw_preds_file, 'rb') as file:\n                preds = pickle.load(file)\n        except:\n            logger.info(\"The raw predictions of %s on %s weren't found, this run can't be evaluated\" % (algo_name,\n                                                                                                  me_ds_name))\n            return None\n\n        # Get the true labels\n        _, _, _, y_test = dataset.data()\n\n        true_events = get_events(y_test)\n\n        root_causes = None\n        if eval_root_cause:\n            root_causes = dataset.get_root_causes()\n        # Flag that indicates root cause identification evaluation is possible\n        rca_possible = eval_root_cause and (preds[\"score_tc_test\"] is not None or preds[\"error_tc_test\"] is not None) \\\n                       and root_causes is not None\n\n        # Load the predictions\n        score_t_test = preds[\"score_t_test\"]\n        score_tc_test = preds[\"score_tc_test\"]\n\n        error_tc_test = preds[\"error_tc_test\"]\n        error_t_test = preds[\"error_t_test\"]\n\n        score_t_train = preds[\"score_t_train\"]\n        score_tc_train = preds[\"score_tc_train\"]\n        error_tc_train = preds[\"error_tc_train\"]\n        error_t_train = preds[\"error_t_train\"]\n\n        recons_tc_train = preds[\"recons_tc_train\"]\n        recons_tc_test = preds[\"recons_tc_test\"]\n\n        try:\n            val_recons_err = np.nanmean(preds[\"val_recons_err\"])\n        except:\n            val_recons_err = None\n\n        try:\n            val_loss = preds[\"val_loss\"]\n        except:\n            val_loss = None\n\n        if telem_only and me_ds_name in [\"msl\", \"smap\"]:\n            if error_tc_train is not None:\n                error_t_train = error_tc_train[:, 0]\n                error_tc_train = None\n            if error_tc_test is not None:\n                error_t_test = error_tc_test[:, 0]\n                error_tc_test = None\n            if score_tc_test is not None:\n                score_t_test = score_tc_test[:, 0]\n                score_tc_test = None\n\n        eval_R = eval_R_model and (error_tc_test is not None or error_t_test is not None)\n        eval_dyn = eval_dyn and ((error_tc_test is not None) or\n                                 (error_t_test is not None))\n\n        # Evaluate on each entity\n        logger.info(\"Evaluating for score_t\")\n        algo_results = collect_eval_metrics(algo_results=algo_results, score_t_test=score_t_test, y_test=y_test,\n                                            thres_methods=thres_methods, logger=logger, true_events=true_events,\n                                            rca_possible=rca_possible and (preds[\"score_tc_test\"] is not None),\n                                            score_tc_test=score_tc_test,\n                                            root_causes=root_causes, score_t_train=score_t_train,\n                                            point_adjust=point_adjust, thres_config_dict=thres_config_dict,\n                                            eval_methods=eval_methods, make_plots=make_plots, dataset=dataset,\n                                            plots_name=plots_name + \"base\", composite_best_f1=composite_best_f1)\n\n        algo_results[\"val_recons_err\"].append(val_recons_err)\n        algo_results[\"val_loss\"].append(val_loss)\n        algo_results[\"std_scores_train\"].append(np.std(score_t_train))\n\n        if eval_R:\n            if algo_name == \"TelemanomAlgo\":\n                logger.info(\"Evaluating for static gaussian for TelemanomAlgo\")\n                # get static gaussian scores. This is usually done in the trainer, but not for this algo\n                distr_names = [\"univar_gaussian\"]\n                distr_par_file = os.path.join(saved_model_root, folder_name, \"distr_parameters\")\n                if error_t_train is None or error_tc_train is None:\n                    score_t_test_gauss_s = error_t_test\n                    score_t_train_gauss_s = None\n                    score_tc_test_gauss_s = error_tc_test\n                else:\n                    distr_params = fit_distributions(distr_par_file, distr_names, predictions_dic=\n                    {\"train_raw_scores\": error_tc_train})[distr_names[0]]\n                    score_t_train_gauss_s, _, score_t_test_gauss_s, score_tc_train_gauss_s, _, score_tc_test_gauss_s = \\\n                        get_scores_channelwise(distr_params, train_raw_scores=error_tc_train,\n                                               val_raw_scores=None, test_raw_scores=error_tc_test,\n                                               logcdf=True)\n\n                telemanom_gauss_s_results = collect_eval_metrics(algo_results=telemanom_gauss_s_results,\n                                                                 score_t_test=score_t_test_gauss_s,\n                                                                 y_test=y_test,\n                                                                 thres_methods=thres_methods,\n                                                                 logger=logger,\n                                                                 true_events=true_events,\n                                                                 rca_possible=rca_possible,\n                                                                 score_tc_test=score_tc_test_gauss_s,\n                                                                 root_causes=root_causes,\n                                                                 score_t_train=score_t_train_gauss_s,\n                                                                 point_adjust=point_adjust,\n                                                                 thres_config_dict=thres_config_dict,\n                                                                 eval_methods=eval_methods,\n                                                                 make_plots=make_plots,\n                                                                 dataset=dataset,\n                                                                 plots_name=plots_name + \"-gauss-s\",\n                                                                 composite_best_f1=composite_best_f1)\n\n            if error_tc_train is not None and error_tc_test is not None:\n                logger.info(\"Doing mean adjustment of train and test error_tc\")\n                mean_c_train = np.mean(error_tc_train, axis=0)\n                error_tc_train_normed = error_tc_train - mean_c_train\n                error_tc_test_normed = error_tc_test - mean_c_train\n                error_t_train_normed = np.sqrt(np.mean(error_tc_train_normed ** 2, axis=1))\n                error_t_test_normed = np.sqrt(np.mean(error_tc_test_normed ** 2, axis=1))\n            else:\n                error_t_test_normed = error_t_test\n                error_t_train_normed = error_t_train\n                error_tc_test_normed = error_tc_test\n                error_tc_train_normed = None\n            logger.info(\"Evaluating for error_t\")\n            algo_R_results = collect_eval_metrics(algo_results=algo_R_results, score_t_test=error_t_test_normed,\n                                                  y_test=y_test,\n                                                  thres_methods=thres_methods, logger=logger, true_events=true_events,\n                                                  rca_possible=rca_possible, score_tc_test=error_tc_test_normed,\n                                                  root_causes=root_causes, score_t_train=error_t_train_normed,\n                                                  point_adjust=point_adjust,\n                                                  thres_config_dict=thres_config_dict, eval_methods=eval_methods,\n                                                  make_plots=make_plots, dataset=dataset,\n                                                  plots_name=plots_name + \"R\",\n                                                  composite_best_f1=composite_best_f1,\n                                                  score_tc_train=error_tc_train_normed)\n            algo_R_results[\"val_recons_err\"].append(val_recons_err)\n            algo_R_results[\"val_loss\"].append(val_loss)\n            if error_t_train is not None:\n                algo_R_results[\"std_scores_train\"].append(np.std(error_t_train))\n\n            # dynamic scoring function\n        if eval_dyn:\n            # dyn_thres_methods = [\"best_f1_test\"]\n            dyn_thres_methods = thres_methods\n            logger.info(\"Evaluating gaussian dynamic scoring for error_t with thres_methods {}\".format(dyn_thres_methods))\n            long_window = thres_config_dict[\"dyn_gauss\"][\"long_window\"]\n            short_window = thres_config_dict[\"dyn_gauss\"][\"short_window\"]\n            if telem_only and me_ds_name in [\"msl\", \"smap\"]:\n                score_t_test_dyn, score_tc_test_dyn, score_t_train_dyn, score_tc_train_dyn = get_dynamic_scores(\n                    error_tc_train=None, error_tc_test=None, error_t_train=error_t_train, error_t_test=error_t_test,\n                    long_window=long_window, short_window=short_window)\n            else:\n                score_t_test_dyn, score_tc_test_dyn, score_t_train_dyn, score_tc_train_dyn = get_dynamic_scores(\n                    error_tc_train, error_tc_test, error_t_train, error_t_test, long_window=long_window,\n                    short_window=short_window)\n            algo_dyn_results = collect_eval_metrics(algo_results=algo_dyn_results, score_t_test=score_t_test_dyn,\n                                                    y_test=y_test, thres_methods=dyn_thres_methods,\n                                                    logger=logger, rca_possible=rca_possible, true_events=true_events,\n                                                    score_tc_test=score_tc_test_dyn, root_causes=root_causes,\n                                                    score_t_train=score_t_train_dyn, point_adjust=point_adjust,\n                                                    thres_config_dict=thres_config_dict, eval_methods=eval_methods,\n                                                    make_plots=make_plots, dataset=dataset,\n                                                    plots_name=plots_name + \"dyn\",\n                                                    composite_best_f1=composite_best_f1,\n                                                    score_tc_train=score_tc_train_dyn)\n            algo_dyn_results[\"val_recons_err\"].append(val_recons_err)\n            algo_dyn_results[\"val_loss\"].append(val_loss)\n            if score_t_train_dyn is not None:\n                algo_dyn_results[\"std_scores_train\"].append(np.std(score_t_train_dyn))\n\n            kernel_sigma = thres_config_dict[\"dyn_gauss\"][\"kernel_sigma\"]\n            score_t_test_dyn_gauss_conv, score_tc_test_dyn_gauss_conv = get_gaussian_kernel_scores(\n                score_t_test_dyn, score_tc_test_dyn, kernel_sigma)\n            if score_t_train_dyn is not None:\n                score_t_train_dyn_gauss_conv, _ = get_gaussian_kernel_scores(score_t_train_dyn, score_tc_train_dyn,\n                                                                             kernel_sigma)\n            else:\n                score_t_train_dyn_gauss_conv = None\n            algo_dyn_gauss_conv_results = collect_eval_metrics(algo_results=algo_dyn_gauss_conv_results,\n                                                               score_t_test=score_t_test_dyn_gauss_conv,\n                                                               y_test=y_test,\n                                                               thres_methods=dyn_thres_methods,\n                                                               logger=logger,\n                                                               rca_possible=rca_possible,\n                                                               true_events=true_events,\n                                                               score_tc_test=score_tc_test_dyn_gauss_conv,\n                                                               root_causes=root_causes,\n                                                               score_t_train=score_t_train_dyn_gauss_conv,\n                                                               point_adjust=point_adjust,\n                                                               thres_config_dict=thres_config_dict,\n                                                               eval_methods=eval_methods,\n                                                               make_plots=make_plots, dataset=dataset,\n                                                               plots_name=plots_name + \"dyn-gauss-conv\",\n                                                               composite_best_f1=composite_best_f1,\n                                                               score_tc_train=None)\n\n    # Combine results from each entity\n    final_results, column_names = combine_entities_eval_metrics(algo_results, thres_methods, me_ds_name, algo_name,\n                                                                rca_possible, eval_methods=eval_methods)\n    if eval_R_model:\n        results_R, _ = combine_entities_eval_metrics(algo_R_results, thres_methods, me_ds_name, algo_name + \"-R\",\n                                                     rca_possible, eval_methods=eval_methods)\n        final_results = np.concatenate((final_results, results_R), axis=0)\n\n        if algo_name == \"TelemanomAlgo\":\n            results_telem, _ = combine_entities_eval_metrics(telemanom_gauss_s_results, thres_methods, me_ds_name,\n                                                             algo_name + \"-Gauss-S\", rca_possible, eval_methods=eval_methods)\n            final_results = np.concatenate((final_results, results_telem), axis=0)\n\n    if eval_dyn:\n        results_dyn, _ = combine_entities_eval_metrics(algo_dyn_results, dyn_thres_methods,\n                                                       me_ds_name, algo_name + \"-dyn\",\n                                                    rca_possible, eval_methods=eval_methods)\n        final_results = np.concatenate((final_results, results_dyn), axis=0)\n        results_dyn_gauss_conv, _ = combine_entities_eval_metrics(algo_dyn_gauss_conv_results,\n                                                                  dyn_thres_methods,\n                                                                  me_ds_name, algo_name + \"-dyn-gauss-conv\",\n                                                                  rca_possible, eval_methods=eval_methods)\n        final_results = np.concatenate((final_results, results_dyn_gauss_conv), axis=0)\n\n    results_df = pd.DataFrame(final_results, columns=column_names)\n    results_df[\"folder_name\"] = saved_model_root\n    new_col_order = list(results_df.columns)[:3] + [\"point_adjust\"] + list(results_df.columns)[3:]\n    results_df[\"point_adjust\"] = point_adjust\n    results_df = results_df[new_col_order]\n    with open(os.path.join(os.path.dirname(saved_model_root), \"config.json\")) as file:\n        algo_config = json.dumps(json.load(file))\n    results_df[\"config\"] = algo_config\n    if thres_config_dict is not None:\n        results_df[\"thres_config\"] = str(thres_config_dict)\n    if point_adjust:\n        filename = os.path.join(saved_model_root, \"results_point_adjust.csv\")\n    else:\n        filename = os.path.join(saved_model_root, \"results.csv\")\n\n    results_df.to_csv(filename, index=False)\n    logger.info(\"Saved results to {}\".format(filename))\n\n\ndef analyse_from_pkls(results_root:str, thres_methods=[\"best_f1_test\"], eval_root_cause=True, point_adjust=False,\n                      eval_R_model=True, eval_dyn=True, thres_config=None, logger=None,\n                      telem_only=True, filename_prefix=\"\", rerun_if_ds=None, process_seeds=None, make_plots=[],\n                      composite_best_f1=False):\n    \"\"\"\n    Function that reads saved predictions and evaluates them for anomaly detection and diagnosis under various\n    settings.\n    :param results_root: dir where predictions for the algo generated by the trainer in a specific folder structure.\n    :param thres_methods: list of thresholding methods with which to evaluate\n    :param eval_root_cause: Set it to True if root cause is desired and possible (i.e. if channel-wise scores are provided\n    in the predictions, else False\n    :param point_adjust: True if point-adjusted evaluation is desired.\n    :param eval_R_model: Corresponds to using Errors scoring function. Set it to True only if errors_t or errors_tc are\n    avaiable in the predictions. Pre-requisite to be True for eval_dyn.\n    :param eval_dyn: Set it to True if Gauss-D and Gauss-D-K scoring function evaluation is desired. Needs eval_R_model\n    to be True.\n    :param thres_config: A function that takes the dataset name as input and returns a dictionary corresponding to the\n    config for each method in thres_methods.\n    :param logger: for logging.\n    :param telem_only: Only affects the evaluation for MSL and SMAP datasets. If set to True, only the sensor channel,\n    i.e. first channel will be used in evaluation. If False, all channels - sensors and commands will be used.\n    :param filename_prefix: desired prefix on the filename.\n    :param process_seeds: specify a list if only some of the seeds need to be analyzed. If None, all seeds for which\n    results.csv doesn't exist will be (re)analyzed.\n    :param rerun_if_ds: set the names of specific datasets for which (re)analysis is required. Otherwise only datasets\n    for which results.csv doesn't exist will be (re)analyzed.\n    :param make_plots: specify which plots are desired. [\"prc\", \"score_t\"] are implemented.\n    :param composite_best_f1: if set to True, the \"best-f1\" threshold will be computed as \"best-fc1\" threshold.\n    :return: None. The evalution results are saved as results.csv for each run.\n    \"\"\"\n    seed = 42\n    result_df_list = []\n    ds_folders = os.listdir(results_root)\n\n    if point_adjust:\n        result_filename = \"results_point_adjust.csv\"\n    else:\n        result_filename = \"results.csv\"\n    if logger is None:\n        init_logging(os.path.join(results_root, 'logs'), prefix=\"eval\")\n        logger = logging.getLogger(__name__)\n    for ds_folder in ds_folders:\n        if ds_folder.endswith(\".csv\") or ds_folder == \"logs\" or \"thres_results\" in ds_folder:\n            continue\n        ds_path = os.path.join(results_root, ds_folder)\n        algo_folders = os.listdir(ds_path)\n        for algo_folder in algo_folders:\n            algo_path = os.path.join(ds_path, algo_folder)\n            config_folders = os.listdir(algo_path)\n            config_folders = [folder for folder in config_folders if not folder.endswith(\".csv\")]\n            for config_folder in config_folders:\n                config_path = os.path.join(algo_path, config_folder)\n                run_folders = os.listdir(config_path)\n                for run_folder in run_folders:\n                    if not run_folder.endswith(\".json\"):\n                        run_path = os.path.join(config_path, run_folder)\n                        current_seed = int(run_folder.split(\"-\", 2)[0])\n                        if process_seeds is not None:\n                            if current_seed not in process_seeds:\n                                continue\n                        if rerun_if_ds is not None:\n                            if (ds_folder in rerun_if_ds) or (rerun_if_ds == 'all'):\n                                if os.path.exists(os.path.join(run_path, result_filename)):\n                                    os.remove(os.path.join(run_path, result_filename))\n                        if not os.path.exists(os.path.join(run_path, result_filename)):\n                            entity_folders = os.listdir(run_path)\n                            skip_this_run = False  # Flag to indicate that evaluation for this run is impossible\n                            for entity_folder in entity_folders:\n                                if entity_folder != \"plots\" and \".csv\" not in entity_folder and entity_folder != \"logs\":\n                                    entity_path = os.path.join(run_path, entity_folder)\n                                    if not skip_this_run:\n                                        try:\n                                            with open(os.path.join(entity_path, \"raw_predictions\"), \"rb\") as file:\n                                                raw_predictions = pickle.load(file)\n                                            assert \"score_t_test\" in raw_predictions.keys()\n                                            if np.isnan(raw_predictions[\"score_t_test\"]).any():\n                                                skip_this_run = True\n                                        except:\n                                            me_ds_name = ds_folder.split(\"_me\")[0]\n                                            ds_class = get_dataset_class(me_ds_name)\n                                            algo_class = get_algo_class(algo_folder)\n                                            entity_name = entity_folder.replace(\"smap-\", \"\").replace(\"msl-\", \"\").\\\n                                                replace(\"smd-\", \"\")\n                                            ds_init_params = {\"seed\": seed, \"entity\": entity_name}\n                                            if me_ds_name == \"swat-long\":\n                                                ds_init_params[\"shorten_long\"] = False\n                                            entity_ds = ds_class(**ds_init_params)\n                                            repredict = repredict_from_saved_model(entity_path, algo_class=algo_class,\n                                                                                   entity=entity_ds, logger=logger)\n                                            if not repredict:\n                                                logger.warning(\"Predictions and trained model couldn't be found, evaluation is \"\n                                                      \"impossible for run saved at %s\" % run_path)\n                                                skip_this_run = True\n                            if not skip_this_run:\n                                evaluate(run_path, thres_methods=thres_methods, eval_root_cause=eval_root_cause,\n                                         point_adjust=point_adjust, eval_R_model=eval_R_model, eval_dyn=eval_dyn,\n                                         thres_config=thres_config, logger=logger, telem_only=telem_only,\n                                         make_plots=make_plots, composite_best_f1=composite_best_f1)\n                        try:\n                            result_df = pd.read_csv(os.path.join(run_path, result_filename))\n                            if \"point_adjust\" not in result_df.columns:\n                                result_df[\"point_adjust\"] = False\n                            result_df_list.append(result_df)\n                        except:\n                            logger.warning(\"Results table couldn't be found for run saved at %s\" % run_path)\n    overall_results = pd.concat(result_df_list, ignore_index=True)\n\n    overall_results.to_csv(os.path.join(results_root, filename_prefix+\"overall_\" + result_filename))\n\n\ndef repredict_from_saved_model(model_root, algo_class, entity, logger):\n    algo_config_filename = os.path.join(model_root, \"init_params\")\n    saved_model_filename = [os.path.join(model_root, filename) for filename in\n                            os.listdir(model_root) if \"trained_model\" in filename]\n    if len(saved_model_filename) == 1:\n        saved_model_filename = saved_model_filename[0]\n    else:\n        saved_model_filename.sort(key=get_chan_num)\n\n    additional_params_filename = os.path.join(model_root, \"additional_params\")\n    if len(additional_params_filename) == 1:\n        additional_params_filename = additional_params_filename[0]\n    try:\n        algo_reload = load_torch_algo(algo_class, algo_config_filename, saved_model_filename,\n                                      additional_params_filename, eval=True)\n        _ = Trainer.predict(algo_reload, entity, model_root, logger=logger)\n        return True\n    except Exception as e:\n        logger.warning(f\"An error occurred while loading saved algo and repredicting: {e}\")\n        return False\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/f1_score_f1_pa.py",
    "content": "import numpy as np\nfrom sklearn.metrics import precision_recall_curve, roc_curve, auc, roc_auc_score, precision_score, recall_score, \\\n    accuracy_score, fbeta_score, average_precision_score\n\n\n# function: calculate the point-adjust f-scores(whether top k)\ndef get_point_adjust_scores(y_test, pred_labels, true_events, thereshold_k=0, whether_top_k=False):\n    tp = 0\n    fn = 0\n    for true_event in true_events.keys():\n        true_start, true_end = true_events[true_event]\n        if whether_top_k is False:\n            if pred_labels[true_start:true_end].sum() > 0:\n                tp += (true_end - true_start)\n            else:\n                fn += (true_end - true_start)\n        else:\n            if pred_labels[true_start:true_end].sum() > thereshold_k:\n                tp += (true_end - true_start)\n            else:\n                fn += (true_end - true_start)\n    fp = np.sum(pred_labels) - np.sum(pred_labels * y_test)\n\n    prec, rec, fscore = get_prec_rec_fscore(tp, fp, fn)\n    return fp, fn, tp, prec, rec, fscore\n\ndef get_adjust_F1PA(pred, gt):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n            \n    from sklearn.metrics import precision_recall_fscore_support\n    from sklearn.metrics import accuracy_score\n\n    accuracy = accuracy_score(gt, pred)\n    precision, recall, f_score, support = precision_recall_fscore_support(gt, pred,\n                                                                          average='binary')\n    return accuracy, precision, recall, f_score\n\n\n# calculate the point-adjusted f-score\ndef get_prec_rec_fscore(tp, fp, fn):\n    if tp == 0:\n        precision = 0\n        recall = 0\n    else:\n        precision = tp / (tp + fp)\n        recall = tp / (tp + fn)\n    fscore = get_f_score(precision, recall)\n    return precision, recall, fscore\n\n\ndef get_f_score(prec, rec):\n    if prec == 0 and rec == 0:\n        f_score = 0\n    else:\n        f_score = 2 * (prec * rec) / (prec + rec)\n    return f_score\n\n\n# function: calculate the normal edition f-scores\ndef get_accuracy_precision_recall_fscore(y_true: list, y_pred: list):\n    accuracy = accuracy_score(y_true, y_pred)\n    # warn_for=() avoids log warnings for any result being zero\n    # precision, recall, f_score, _ = prf(y_true, y_pred, average='binary', warn_for=())\n    precision = precision_score(y_true, y_pred)\n    recall = recall_score(y_true, y_pred)\n    f_score = (2 * precision * recall) / (precision + recall)\n    if precision == 0 and recall == 0:\n        f05_score = 0\n    else:\n        f05_score = fbeta_score(y_true, y_pred, average='binary', beta=0.5)\n    return accuracy, precision, recall, f_score, f05_score\n\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/f1_series.py",
    "content": "from fc_score import *\nfrom f1_score_f1_pa import *\nfrom evaluate_utils import *\n\ndefault_thres_config = {\"top_k_time\": {},\n                        \"best_f1_test\": {\"exact_pt_adj\": True},\n                        \"thresholded_score\": {},\n                        \"tail_prob\": {\"tail_prob\": 2},\n                        \"tail_prob_1\": {\"tail_prob\": 1},\n                        \"tail_prob_2\": {\"tail_prob\": 2},\n                        \"tail_prob_3\": {\"tail_prob\": 3},\n                        \"tail_prob_4\": {\"tail_prob\": 4},\n                        \"tail_prob_5\": {\"tail_prob\": 5},\n                        \"dyn_gauss\": {\"long_window\": 10000, \"short_window\": 1, \"kernel_sigma\": 10},\n                        \"nasa_npt\": {\"batch_size\": 70, \"window_size\": 30, \"telem_only\": True,\n                                     \"smoothing_perc\": 0.005, \"l_s\": 250, \"error_buffer\": 5, \"p\": 0.05}}\n\n\ndef threshold_and_predict(score_t_test, y_test, true_events, logger, test_anom_frac, thres_method=\"top_k_time\",\n                          point_adjust=False, score_t_train=None, thres_config_dict=dict(), return_auc=False,\n                          composite_best_f1=False):\n    if thres_method in thres_config_dict.keys():\n        config = thres_config_dict[thres_method]\n    else:\n        config = default_thres_config[thres_method]\n    # test_anom_frac = (np.sum(y_test)) / len(y_test)\n    auroc = None\n    avg_prec = None\n    if thres_method == \"thresholded_score\":\n        opt_thres = 0.5\n        if set(score_t_test) - {0, 1}:\n            logger.error(\"Score_t_test isn't binary. Predicting all as non-anomalous\")\n            pred_labels = np.zeros(len(score_t_test))\n        else:\n            pred_labels = score_t_test\n\n    elif thres_method == \"best_f1_test\" and point_adjust:\n        prec, rec, thresholds = precision_recall_curve(y_test, score_t_test, pos_label=1)\n        if not config[\"exact_pt_adj\"]:\n            fscore_best_time = [get_f_score(precision, recall) for precision, recall in zip(prec, rec)]\n            opt_num = np.squeeze(np.argmax(fscore_best_time))\n            opt_thres = thresholds[opt_num]\n            thresholds = np.random.choice(thresholds, size=5000) + [opt_thres]\n        fscores = []\n        for thres in thresholds:\n            _, _, _, _, _, fscore = get_point_adjust_scores(y_test, score_t_test > thres, true_events)\n            fscores.append(fscore)\n        opt_thres = thresholds[np.argmax(fscores)]\n        pred_labels = score_t_test > opt_thres\n\n    elif thres_method == \"best_f1_test\" and composite_best_f1:\n        prec, rec, thresholds = precision_recall_curve(y_test, score_t_test, pos_label=1)\n        precs_t = prec\n        fscores_c = [get_composite_fscore_from_scores(score_t_test, thres, true_events, prec_t) for thres, prec_t in\n                     zip(thresholds, precs_t)]\n        try:\n            opt_thres = thresholds[np.nanargmax(fscores_c)]\n        except:\n            opt_thres = 0.0\n        pred_labels = score_t_test > opt_thres\n\n    elif thres_method == \"top_k_time\":\n        opt_thres = np.nanpercentile(score_t_test, 100 * (1 - test_anom_frac), interpolation='higher')\n        pred_labels = np.where(score_t_test > opt_thres, 1, 0)\n\n    elif thres_method == \"best_f1_test\":\n        prec, rec, thres = precision_recall_curve(y_test, score_t_test, pos_label=1)\n        fscore = [get_f_score(precision, recall) for precision, recall in zip(prec, rec)]\n        opt_num = np.squeeze(np.argmax(fscore))\n        opt_thres = thres[opt_num]\n        pred_labels = np.where(score_t_test > opt_thres, 1, 0)\n\n    elif \"tail_prob\" in thres_method:\n        tail_neg_log_prob = config[\"tail_prob\"]\n        opt_thres = tail_neg_log_prob\n        pred_labels = np.where(score_t_test > opt_thres, 1, 0)\n\n    elif thres_method == \"nasa_npt\":\n        opt_thres = 0.5\n        pred_labels = get_npt_labels(score_t_test, y_test, config)\n    else:\n        logger.error(\"Thresholding method {} not in [top_k_time, best_f1_test, tail_prob]\".format(thres_method))\n        return None, None\n    if return_auc:\n        avg_prec = average_precision_score(y_test, score_t_test)\n        auroc = roc_auc_score(y_test, score_t_test)\n        return opt_thres, pred_labels, avg_prec, auroc\n    return opt_thres, pred_labels\n\n\n# most-top funcion\ndef evaluate_predicted_labels(pred_labels, y_test, true_events, logger, eval_method=\"time-wise\", breaks=[],\n                              point_adjust=False):\n    \"\"\"\n    Computes evaluation metrics for the binary classifications given the true and predicted labels\n    :param point_adjust: used to judge whether is pa\n    :param pred_labels: array of predicted labels\n    :param y_test: array of true labels\n    :param eval_method: string that indicates whether we evaluate the classification time point-wise or event-wise\n    :param breaks: array of discontinuities in the time series, relevant only if you look at event-wise\n    :param return_raw: Boolean that indicates whether we want to return tp, fp and fn or prec, recall and f1\n    :return: tuple of evaluation metrics\n    \"\"\"\n\n    if eval_method == \"time-wise\":\n        # point-adjust fscore\n        if point_adjust:\n            fp, fn, tp, prec, rec, fscore = get_point_adjust_scores(y_test, pred_labels, true_events)\n        # normal fscore\n        else:\n            _, prec, rec, fscore, _ = get_accuracy_precision_recall_fscore(y_test, pred_labels)\n            tp = np.sum(pred_labels * y_test)\n            fp = np.sum(pred_labels) - tp\n            fn = np.sum(y_test) - tp\n    # event-wise\n    else:\n        logger.error(\"Evaluation method {} not in [time-wise, event-wise]\".format(eval_method))\n        return 0, 0, 0\n\n    return tp, fp, fn, prec, rec, fscore\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/fc_score.py",
    "content": "import numpy as np\nfrom sklearn.metrics import precision_score\n\n\ndef get_events(y_test, outlier=1, normal=0):\n    events = dict()\n    label_prev = normal\n    event = 0  # corresponds to no event\n    event_start = 0\n    for tim, label in enumerate(y_test):\n        if label == outlier:\n            if label_prev == normal:\n                event += 1\n                event_start = tim\n        else:\n            if label_prev == outlier:\n                event_end = tim - 1\n                events[event] = (event_start, event_end)\n        label_prev = label\n\n    if label_prev == outlier:\n        event_end = tim - 1\n        events[event] = (event_start, event_end)\n    return events\n\n\ndef get_composite_fscore_raw(y_test, pred_labels,  true_events, return_prec_rec=False):\n    tp = np.sum([pred_labels[start:end + 1].any() for start, end in true_events.values()])\n    fn = len(true_events) - tp\n    rec_e = tp / (tp + fn)\n    prec_t = precision_score(y_test, pred_labels)\n    fscore_c = 2 * rec_e * prec_t / (rec_e + prec_t)\n    if prec_t == 0 and rec_e == 0:\n        fscore_c = 0\n    if return_prec_rec:\n        return prec_t, rec_e, fscore_c\n    return fscore_c\n\n\ndef main():\n    y_test = np.zeros(100)\n    y_test[10:20] = 1\n    y_test[50:60] = 1\n    pred_labels = np.zeros(100)\n    pred_labels[15:17] = 1\n    pred_labels[55:62] = 1\n    # pred_labels[51:55] = 1\n    # true_events = get_events(y_test)\n    prec_t, rec_e, fscore_c = get_composite_fscore_raw(pred_labels, y_test, return_prec_rec=True)\n#     print(\"Prec_t: {}, rec_e: {}, fscore_c: {}\".format(prec_t, rec_e, fscore_c))\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/metrics.py",
    "content": "from other_anomaly_baselines.metrics.f1_score_f1_pa import *\nfrom other_anomaly_baselines.metrics.fc_score import *\nfrom other_anomaly_baselines.metrics.precision_at_k import *\nfrom other_anomaly_baselines.metrics.customizable_f1_score import *\nfrom other_anomaly_baselines.metrics.AUC import *\nfrom other_anomaly_baselines.metrics.Matthews_correlation_coefficient import *\nfrom other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events\nfrom other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events\nfrom other_anomaly_baselines.metrics.vus.models.feature import Window\nfrom other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc\nimport numpy as np\n\ndef combine_all_evaluation_scores(y_test, pred_labels, anomaly_scores):\n    events_pred = convert_vector_to_events(y_test) \n    events_gt = convert_vector_to_events(pred_labels)\n    Trange = (0, len(y_test))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    true_events = get_events(y_test)\n    pa_accuracy, pa_precision, pa_recall, pa_f_score = get_adjust_F1PA(y_test, pred_labels)\n    MCC_score = MCC(y_test, pred_labels)\n    vus_results = get_range_vus_roc(y_test, pred_labels, 100) # default slidingWindow = 100\n    \n    score_list_simple = {\n                  \"pa_accuracy\":pa_accuracy, \n                  \"pa_precision\":pa_precision, \n                  \"pa_recall\":pa_recall, \n                  \"pa_f_score\":pa_f_score,\n                  \"MCC_score\":MCC_score, \n                  \"Affiliation precision\": affiliation['precision'], \n                  \"Affiliation recall\": affiliation['recall'],\n                  \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"], \n                  \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n                  \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n                  \"VUS_PR\": vus_results[\"VUS_PR\"]\n                  }\n    \n    # return score_list, score_list_simple\n    return score_list_simple\n\n\nif __name__ == '__main__':\n    y_test = np.load(\"data/events_pred_MSL.npy\")+0\n    pred_labels = np.load(\"data/events_gt_MSL.npy\")+0\n    anomaly_scores = np.load(\"data/events_scores_MSL.npy\")\n    print(len(y_test), max(anomaly_scores), min(anomaly_scores))\n    score_list_simple = combine_all_evaluation_scores(y_test, pred_labels, anomaly_scores)\n\n    for key, value in score_list_simple.items():\n        print('{0:21} :{1:10f}'.format(key, value))"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/precision_at_k.py",
    "content": "# k is defined as the number of anomalies\n# only calculate the range top k not the whole set\nimport numpy as np\n\n\ndef precision_at_k(y_test, score_t_test, pred_labels):\n    # top-k\n    k = int(np.sum(y_test))\n    threshold = np.percentile(score_t_test, 100 * (1 - k / len(y_test)))\n\n    # precision_at_k = metrics.top_k_accuracy_score(label, score, k)\n    p_at_k = np.where(pred_labels > threshold)[0]\n    TP_at_k = sum(y_test[p_at_k])\n    precision_at_k = TP_at_k / k\n    return precision_at_k\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/analysis/robustness_eval.py",
    "content": "from random import shuffle\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport pandas as pd\nfrom tqdm import tqdm as tqdm\nimport time\nfrom sklearn.preprocessing import MinMaxScaler\nimport random\n\n\nimport os\nimport sys\nmodule_path = os.path.abspath(os.path.join('../..'))\nif module_path not in sys.path:\n    sys.path.append(module_path)\n\nfrom other_anomaly_baselines.metrics.vus.utils.slidingWindows import find_length\nfrom other_anomaly_baselines.metrics.vus.utils.metrics import metricor\n\nfrom other_anomaly_baselines.metrics.vus.models.distance import Fourier\nfrom other_anomaly_baselines.metrics.vus.models.feature import Window\n\n\ndef generate_new_label(label,lag):\n    if lag < 0:\n        return np.array(list(label[-lag:]) + [0]*(-lag))\n    elif lag > 0:\n        return np.array([0]*lag + list(label[:-lag]))\n    elif lag == 0:\n        return label\n\ndef compute_anomaly_acc_lag(methods_scores,label,slidingWindow,methods_keys):\n    \n    lag_range = list(range(-slidingWindow//4,slidingWindow//4,5))\n    methods_acc = {}\n    for i,methods_score in enumerate(tqdm(methods_keys)):\n        dict_acc = {\n            'R_AUC_ROC':      [],\n            'AUC_ROC':        [],\n            'R_AUC_PR':       [],\n            'AUC_PR':         [],\n            'VUS_ROC':        [],\n            'VUS_PR':         [],\n            'Precision':      [],\n            'Recall':         [],\n            'F':              [],\n            'ExistenceReward':[],\n            'OverlapReward':  [],\n            'Precision@k':    [],\n            'Rprecision':     [],\n            'Rrecall':        [],\n            'RF':             []}\n        \n        for lag in tqdm(lag_range):\n            new_label = generate_new_label(label,lag)\n            \n            grader = metricor()  \n\n            R_AUC, R_AP, R_fpr, R_tpr, R_prec = grader.RangeAUC(labels=new_label, score=methods_scores[methods_score], window=slidingWindow, plot_ROC=True) \n            L, fpr, tpr= grader.metric_new(new_label, methods_scores[methods_score], plot_ROC=True)\n            precision, recall, AP = grader.metric_PR(new_label, methods_scores[methods_score])  \n            Y, Z, X, X_ap, W, Z_ap,avg_auc_3d, avg_ap_3d = generate_curve(new_label,methods_scores[methods_score],2*slidingWindow)\n            L1 = [ elem for elem in L]\n\n            dict_acc['R_AUC_ROC']      +=[R_AUC]\n            dict_acc['AUC_ROC']        +=[L1[0]]\n            dict_acc['R_AUC_PR']       +=[R_AP]\n            dict_acc['AUC_PR']         +=[AP]\n            dict_acc['VUS_ROC']        +=[avg_auc_3d]\n            dict_acc['VUS_PR']         +=[avg_ap_3d]\n            dict_acc['Precision']      +=[L1[1]]\n            dict_acc['Recall']         +=[L1[2]]\n            dict_acc['F']              +=[L1[3]]\n            dict_acc['ExistenceReward']+=[L1[5]]\n            dict_acc['OverlapReward']  +=[L1[6]]\n            dict_acc['Precision@k']    +=[L1[9]]\n            dict_acc['Rprecision']     +=[L1[7]]\n            dict_acc['Rrecall']        +=[L1[4]]\n            dict_acc['RF']             +=[L1[8]]\n\n        methods_acc[methods_score] = dict_acc\n    return methods_acc\n\n\ndef compute_anomaly_acc_percentage(methods_scores,label,slidingWindow,methods_keys,pos_first_anom):\n    \n    \n    list_pos = []\n    step_a = max(0,(len(label) - pos_first_anom-200))//20\n    step_b = max(0,pos_first_anom-200)//20\n    pos_a = min(len(label),pos_first_anom + 200)\n    pos_b = max(0,pos_first_anom - 200)\n    list_pos.append((pos_b,pos_a))\n    for pos_iter in range(20):\n        pos_a = min(len(label),pos_a + step_a)\n        pos_b = max(0,pos_b - step_b)\n        list_pos.append((pos_b,pos_a))\n    methods_acc = {}\n    print(list_pos)\n    for i,methods_score in enumerate(tqdm(methods_keys)):\n        dict_acc = {\n            'R_AUC_ROC':      [],\n            'AUC_ROC':        [],\n            'R_AUC_PR':       [],\n            'AUC_PR':         [],\n            'VUS_ROC':        [],\n            'VUS_PR':         [],\n            'Precision':      [],\n            'Recall':         [],\n            'F':              [],\n            'ExistenceReward':[],\n            'OverlapReward':  [],\n            'Precision@k':    [],\n            'Rprecision':     [],\n            'Rrecall':        [],\n            'RF':             []}\n        \n        for end_pos in tqdm(list_pos):\n            new_label = label[end_pos[0]:end_pos[1]]\n            new_score = np.array(methods_scores[methods_score])[end_pos[0]:end_pos[1]]\n            grader = metricor()  \n\n            R_AUC, R_AP, R_fpr, R_tpr, R_prec = grader.RangeAUC(labels=new_label, score=new_score, window=slidingWindow, plot_ROC=True) \n            L, fpr, tpr= grader.metric_new(new_label, new_score, plot_ROC=True)\n            precision, recall, AP = grader.metric_PR(new_label, new_score)  \n            Y, Z, X, X_ap, W, Z_ap,avg_auc_3d, avg_ap_3d = generate_curve(new_label,new_score,2*slidingWindow)\n            L1 = [ elem for elem in L]\n\n            dict_acc['R_AUC_ROC']      +=[R_AUC]\n            dict_acc['AUC_ROC']        +=[L1[0]]\n            dict_acc['R_AUC_PR']       +=[R_AP]\n            dict_acc['AUC_PR']         +=[AP]\n            dict_acc['VUS_ROC']        +=[avg_auc_3d]\n            dict_acc['VUS_PR']         +=[avg_ap_3d]\n            dict_acc['Precision']      +=[L1[1]]\n            dict_acc['Recall']         +=[L1[2]]\n            dict_acc['F']              +=[L1[3]]\n            dict_acc['ExistenceReward']+=[L1[5]]\n            dict_acc['OverlapReward']  +=[L1[6]]\n            dict_acc['Precision@k']    +=[L1[9]]\n            dict_acc['Rprecision']     +=[L1[7]]\n            dict_acc['Rrecall']        +=[L1[4]]\n            dict_acc['RF']             +=[L1[8]]\n\n        methods_acc[methods_score] = dict_acc\n    return methods_acc\n\ndef compute_anomaly_acc_noise(methods_scores,label,slidingWindow,methods_keys):\n    \n    lag_range = list(range(-slidingWindow//2,slidingWindow//2,10))\n    methods_acc = {}\n    for i,methods_score in enumerate(tqdm(methods_keys)):\n        dict_acc = {\n            'R_AUC_ROC':      [],\n            'AUC_ROC':        [],\n            'R_AUC_PR':       [],\n            'AUC_PR':         [],\n            'VUS_ROC':        [],\n            'VUS_PR':         [],\n            'Precision':      [],\n            'Recall':         [],\n            'F':              [],\n            'ExistenceReward':[],\n            'OverlapReward':  [],\n            'Precision@k':    [],\n            'Rprecision':     [],\n            'Rrecall':        [],\n            'RF':             []}\n        \n        for lag in tqdm(lag_range):\n            new_label = label\n            \n            grader = metricor()  \n\n            noise = np.random.normal(-0.1,0.1,len(methods_scores[methods_score]))\n            \n            new_score = np.array(methods_scores[methods_score]) + noise\n            new_score = (new_score - min(new_score))/(max(new_score) - min(new_score))\n            \n            R_AUC, R_AP, R_fpr, R_tpr, R_prec = grader.RangeAUC(labels=new_label, score=new_score, window=slidingWindow, plot_ROC=True) \n            L, fpr, tpr= grader.metric_new(new_label, new_score, plot_ROC=True)\n            precision, recall, AP = grader.metric_PR(new_label, new_score)  \n            Y, Z, X, X_ap, W, Z_ap,avg_auc_3d, avg_ap_3d = generate_curve(new_label,new_score,2*slidingWindow)\n            L1 = [ elem for elem in L]\n\n            dict_acc['R_AUC_ROC']      +=[R_AUC]\n            dict_acc['AUC_ROC']        +=[L1[0]]\n            dict_acc['R_AUC_PR']       +=[R_AP]\n            dict_acc['AUC_PR']         +=[AP]\n            dict_acc['VUS_ROC']        +=[avg_auc_3d]\n            dict_acc['VUS_PR']         +=[avg_ap_3d]\n            dict_acc['Precision']      +=[L1[1]]\n            dict_acc['Recall']         +=[L1[2]]\n            dict_acc['F']              +=[L1[3]]\n            dict_acc['ExistenceReward']+=[L1[5]]\n            dict_acc['OverlapReward']  +=[L1[6]]\n            dict_acc['Precision@k']    +=[L1[9]]\n            dict_acc['Rprecision']     +=[L1[7]]\n            dict_acc['Rrecall']        +=[L1[4]]\n            dict_acc['RF']             +=[L1[8]]\n\n        methods_acc[methods_score] = dict_acc\n    return methods_acc\n\n\ndef compute_anomaly_acc_pairwise(methods_scores,label,slidingWindow,method1,method2):\n    \n    lag_range = list(range(-slidingWindow//4,slidingWindow//4,5))\n    methods_acc = {}\n    method_key = [method1]\n    if method2 is not None:\n        method_key = [method1,method2]\n    for i,methods_score in enumerate(tqdm(method_key)):\n        dict_acc = {\n            'R_AUC_ROC':      [],\n            'AUC_ROC':        [],\n            'R_AUC_PR':       [],\n            'AUC_PR':         [],\n            'VUS_ROC':        [],\n            'VUS_PR':         [],\n            'Precision':      [],\n            'Recall':         [],\n            'F':              [],\n            'ExistenceReward':[],\n            'OverlapReward':  [],\n            'Precision@k':    [],\n            'Rprecision':     [],\n            'Rrecall':        [],\n            'RF':             []}\n        \n        for lag in tqdm(range(60)):\n            new_lag = random.randint(-slidingWindow//4,slidingWindow//4)\n            new_label = generate_new_label(label,new_lag)\n            \n            noise = np.random.normal(-0.1,0.1,len(methods_scores[methods_score]))\n            new_score = np.array(methods_scores[methods_score]) + noise\n            new_score = (new_score - min(new_score))/(max(new_score) - min(new_score))\n            \n            grader = metricor()  \n\n            R_AUC, R_AP, R_fpr, R_tpr, R_prec = grader.RangeAUC(labels=new_label, score=new_score, window=slidingWindow, plot_ROC=True) \n            L, fpr, tpr= grader.metric_new(new_label, new_score, plot_ROC=True)\n            precision, recall, AP = grader.metric_PR(new_label, new_score)  \n            #range_anomaly = grader.range_convers_new(new_label)\n            Y, Z, X, X_ap, W, Z_ap,avg_auc_3d, avg_ap_3d = generate_curve(new_label,new_score,2*slidingWindow)\n            L1 = [ elem for elem in L]\n\n            dict_acc['R_AUC_ROC']      +=[R_AUC]\n            dict_acc['AUC_ROC']        +=[L1[0]]\n            dict_acc['R_AUC_PR']       +=[R_AP]\n            dict_acc['AUC_PR']         +=[AP]\n            dict_acc['VUS_ROC']        +=[avg_auc_3d]\n            dict_acc['VUS_PR']         +=[avg_ap_3d]\n            dict_acc['Precision']      +=[L1[1]]\n            dict_acc['Recall']         +=[L1[2]]\n            dict_acc['F']              +=[L1[3]]\n            dict_acc['ExistenceReward']+=[L1[5]]\n            dict_acc['OverlapReward']  +=[L1[6]]\n            dict_acc['Precision@k']    +=[L1[9]]\n            dict_acc['Rprecision']     +=[L1[7]]\n            dict_acc['Rrecall']        +=[L1[4]]\n            dict_acc['RF']             +=[L1[8]]\n\n        methods_acc[methods_score] = dict_acc\n    return methods_acc\n\n\ndef normalize_dict_exp(methods_acc_lag,methods_keys):\n    key_metrics = [\n        'VUS_ROC',\n        'VUS_PR',\n        'R_AUC_ROC',\n        'R_AUC_PR',\n        'AUC_ROC',\n        'AUC_PR',\n        'Rprecision',\n        'Rrecall',\n        'RF',\n        'Precision',\n        'Recall',\n        'F',\n        'Precision@k'\n    ][::-1]\n    \n    norm_methods_acc_lag = {}\n    for key in methods_keys:\n        norm_methods_acc_lag[key] = {}\n        for key_metric in key_metrics:\n            ts = methods_acc_lag[key][key_metric]\n            new_ts = list(np.array(ts) -  np.mean(ts))\n            norm_methods_acc_lag[key][key_metric] = new_ts\n    return norm_methods_acc_lag\n        \ndef group_dict(methods_acc_lag,methods_keys):\n    key_metrics = [\n        'VUS_ROC',\n        'VUS_PR',\n        'R_AUC_ROC',\n        'R_AUC_PR',\n        'AUC_ROC',\n        'AUC_PR',\n        'Rprecision',\n        'Rrecall',\n        'RF',\n        'Precision',\n        'Recall',\n        'F',\n        'Precision@k'\n    ][::-1]\n    \n    norm_methods_acc_lag = {key:[] for key in key_metrics}\n    for key in methods_keys:\n        for key_metric in key_metrics:\n            ts = list(methods_acc_lag[key][key_metric])\n            new_ts = list(np.array(ts) -  np.mean(ts))\n            norm_methods_acc_lag[key_metric] += new_ts\n    return norm_methods_acc_lag\n\n\ndef generate_curve(label,score,slidingWindow):\n    tpr_3d, fpr_3d, prec_3d, window_3d, avg_auc_3d, avg_ap_3d = metricor().RangeAUC_volume(labels_original=label, score=score, windowSize=1*slidingWindow)\n\n    X = np.array(tpr_3d).reshape(1,-1).ravel()\n    X_ap = np.array(tpr_3d)[:,:-1].reshape(1,-1).ravel()\n    Y = np.array(fpr_3d).reshape(1,-1).ravel()\n    W = np.array(prec_3d).reshape(1,-1).ravel()\n    Z = np.repeat(window_3d, len(tpr_3d[0]))\n    Z_ap = np.repeat(window_3d, len(tpr_3d[0])-1)\n    \n    return Y, Z, X, X_ap, W, Z_ap,avg_auc_3d, avg_ap_3d\n\ndef box_plot(data, edge_color, fill_color):\n    bp = ax.boxplot(data, patch_artist=True)\n    \n    for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:\n        plt.setp(bp[element], color=edge_color)\n\n    for patch in bp['boxes']:\n        patch.set(facecolor=fill_color)       \n        \n    return bp\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/analysis/score_computation.py",
    "content": "\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport pandas as pd\nfrom tqdm import tqdm as tqdm\nimport time\nfrom sklearn.preprocessing import MinMaxScaler\nimport random\n\n\nimport os\nimport sys\nmodule_path = os.path.abspath(os.path.join('../..'))\nif module_path not in sys.path:\n    sys.path.append(module_path)\n\nfrom metrics.vus.utils.slidingWindows import find_length\nfrom metrics.vus.utils.metrics import metricor\n\nfrom metrics.vus.models.distance import Fourier\nfrom metrics.vus.models.feature import Window\nfrom metrics.vus.models.cnn import cnn\nfrom metrics.vus.models.AE_mlp2 import AE_MLP2\nfrom metrics.vus.models.lstm import lstm\nfrom metrics.vus.models.ocsvm import OCSVM\nfrom metrics.vus.models.poly import POLY\nfrom metrics.vus.models.pca import PCA\nfrom metrics.vus.models.norma import NORMA\nfrom metrics.vus.models.matrix_profile import MatrixProfile\nfrom metrics.vus.models.lof import LOF\nfrom metrics.vus.models.iforest import IForest\n\ndef find_section_length(label,length):\n    best_i = None\n    best_sum = None\n    current_subseq = False\n    for i in range(len(label)):\n        changed = False\n        if label[i] == 1:\n            if current_subseq == False:\n                current_subseq = True\n                if best_i is None:\n                    changed = True\n                    best_i = i\n                    best_sum = np.sum(label[max(0,i-200):min(len(label),i+9800)])\n                else:\n                    if np.sum(label[max(0,i-200):min(len(label),i+9800)]) < best_sum:\n                        changed = True\n                        best_i = i\n                        best_sum = np.sum(label[max(0,i-200):min(len(label),i+9800)])\n                    else:\n                        changed = False\n                if changed:\n                    diff = i+9800 - len(label)\n\n                    pos1 = max(0,i-200 - max(0,diff))\n                    pos2 = min(i+9800,len(label))\n        else:\n            current_subseq = False\n    if best_i is not None:\n        return best_i-pos1,(pos1,pos2)\n    else:\n        return None,None\n\ndef generate_data(filepath,init_pos,max_length):\n    \n    df = pd.read_csv(filepath, header=None).to_numpy()\n    name = filepath.split('/')[-1]\n    #max_length = 30000\n    data = df[init_pos:init_pos+max_length,0].astype(float)\n    label = df[init_pos:init_pos+max_length,1]\n    \n    pos_first_anom,pos = find_section_length(label,max_length)\n    \n    data = df[pos[0]:pos[1],0].astype(float)\n    label = df[pos[0]:pos[1],1]\n    \n    slidingWindow = find_length(data)\n    #slidingWindow = 70\n    X_data = Window(window = slidingWindow).convert(data).to_numpy()\n\n    data_train = data[:int(0.1*len(data))]\n    data_test = data\n\n    X_train = Window(window = slidingWindow).convert(data_train).to_numpy()\n    X_test = Window(window = slidingWindow).convert(data_test).to_numpy()\n    \n    return pos_first_anom,slidingWindow,data,X_data,data_train,data_test,X_train,X_test,label\n\ndef compute_score(methods,slidingWindow,data,X_data,data_train,data_test,X_train,X_test):\n    \n    methods_scores = {}\n    for method in methods:\n        start_time = time.time()\n        if method == 'IForest':\n            clf = IForest(n_jobs=1)\n            x = X_data\n            clf.fit(x)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n            score = np.array([score[0]]*math.ceil((slidingWindow-1)/2) + list(score) + [score[-1]]*((slidingWindow-1)//2))\n\n        elif method == 'LOF':\n            clf = LOF(n_neighbors=20, n_jobs=1)\n            x = X_data\n            clf.fit(x)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n            score = np.array([score[0]]*math.ceil((slidingWindow-1)/2) + list(score) + [score[-1]]*((slidingWindow-1)//2))\n\n        elif method == 'MatrixProfile':\n            clf = MatrixProfile(window = slidingWindow)\n            x = data\n            clf.fit(x)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n            score = np.array([score[0]]*math.ceil((slidingWindow-1)/2) + list(score) + [score[-1]]*((slidingWindow-1)//2))\n\n        elif method == 'NormA':\n            clf = NORMA(pattern_length = slidingWindow, nm_size=3*slidingWindow)\n            x = data\n            clf.fit(x)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n            score = np.array([score[0]]*((slidingWindow-1)//2) + list(score) + [score[-1]]*((slidingWindow-1)//2))\n\n        elif method == 'PCA':\n            clf = PCA()\n            x = X_data\n            clf.fit(x)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n            score = np.array([score[0]]*math.ceil((slidingWindow-1)/2) + list(score) + [score[-1]]*((slidingWindow-1)//2))\n\n        elif method == 'POLY':\n            clf = POLY(power=3, window = slidingWindow)\n            x = data\n            clf.fit(x)\n            measure = Fourier()\n            measure.detector = clf\n            measure.set_param()\n            clf.decision_function(measure=measure)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n\n        elif method == 'OCSVM':\n            X_train_ = MinMaxScaler(feature_range=(0,1)).fit_transform(X_train.T).T\n            X_test_ = MinMaxScaler(feature_range=(0,1)).fit_transform(X_test.T).T\n            clf = OCSVM(nu=0.05)\n            clf.fit(X_train_, X_test_)\n            score = clf.decision_scores_\n            score = np.array([score[0]]*math.ceil((slidingWindow-1)/2) + list(score) + [score[-1]]*((slidingWindow-1)//2))\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n\n        elif method == 'LSTM':\n            clf = lstm(slidingwindow = slidingWindow, predict_time_steps=1, epochs = 50, patience = 5, verbose=0)\n            clf.fit(data_train, data_test)\n            measure = Fourier()\n            measure.detector = clf\n            measure.set_param()\n            clf.decision_function(measure=measure)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n\n        elif method == 'AE':\n            clf = AE_MLP2(slidingWindow = slidingWindow, epochs=100, verbose=0)\n            clf.fit(data_train, data_test)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n\n        elif method == 'CNN':\n            clf = cnn(slidingwindow = slidingWindow, predict_time_steps=1, epochs = 100, patience = 5, verbose=0)\n            clf.fit(data_train, data_test)\n            measure = Fourier()\n            measure.detector = clf\n            measure.set_param()\n            clf.decision_function(measure=measure)\n            score = clf.decision_scores_\n            score = MinMaxScaler(feature_range=(0,1)).fit_transform(score.reshape(-1,1)).ravel()\n\n        #end_time = time.time()\n        #time_exec = end_time - start_time\n        #print(method,\"\\t time: {}\".format(time_exec))\n        methods_scores[method] = score\n        \n    return methods_scores\n\n\n\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/metrics.py",
    "content": "from .utils.metrics import metricor\nfrom .analysis.robustness_eval import generate_curve\n\n\ndef get_range_vus_roc(score, labels, slidingWindow):\n    grader = metricor()\n    R_AUC_ROC, R_AUC_PR, _, _, _ = grader.RangeAUC(labels=labels, score=score, window=slidingWindow, plot_ROC=True)\n    _, _, _, _, _, _,VUS_ROC, VUS_PR = generate_curve(labels, score, 2*slidingWindow)\n    metrics = {'R_AUC_ROC': R_AUC_ROC, 'R_AUC_PR': R_AUC_PR, 'VUS_ROC': VUS_ROC, 'VUS_PR': VUS_PR}\n\n    return metrics\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/models/distance.py",
    "content": "# -*- coding: utf-8 -*-\n\"\"\"Classes of distance measure for model type A\n\"\"\"\n\nimport numpy as np\n# import matplotlib.pyplot as plt\n# import random\nfrom arch import arch_model\n# import pandas as pd\nimport math\n# import pmdarima as pm\n# from pmdarima import model_selection\n# import os\n# import dis\n# import statistics\n# from sklearn import metrics\n# import sklearn\n\n\nclass Euclidean:\n    \"\"\" The function class for Lp euclidean norm\n    ----------\n    Power : int, optional (default=1)\n        The power of the lp norm. For power = k, the measure is calculagted by |x - y|_k\n    neighborhood : int, optional (default=max (100, 10*window size))\n        The length of neighborhood to derivete the normalizing constant D which is based on\n        the difference of maximum and minimum in the neighborhood minus window. \n    window: int, optional (default = length of input data)\n        The length of the subsequence to be compaired\n    Attributes\n    ----------\n    decision_scores_ : numpy array of shape (n_samples,)\n        The outlier scores of the training data.\n        The higher, the more abnormal. Outliers tend to have higher\n        scores. This value is available once the detector is\n        fitted.\n    detector: Object classifier\n        the anomaly detector that is used\n    \"\"\"\n    def __init__(self, power = 1, neighborhood = 100, window = 20, norm = False):\n        self.power = power\n        self.window = window\n        self.neighborhood = neighborhood\n        self.detector = None\n        self.decision_scores_  = []\n        self.norm = norm\n        self.X_train = 2\n    def measure(self, X, Y, index):\n        \"\"\"Derive the decision score based on the given distance measure \n        Parameters\n        ----------\n        X : numpy array of shape (n_samples, )\n            The real input samples subsequence.\n        Y : numpy array of shape (n_samples, )\n            The estimated input samples subsequence.\n        Index : int\n        the index of the starting point in the subsequence\n        Returns\n        -------\n        score : float\n            dissimiarity score between the two subsquence\n        \"\"\"\n        X_train = self.X_train\n        X_train = self.detector.X_train_\n        power = self.power\n        \n        window = self.window\n        neighborhood = self.neighborhood\n        norm = self.norm\n        data = X_train\n        if norm == False:\n            if X.shape[0] == 0:\n                score = 0\n            else:\n                score = np.linalg.norm(X-Y, power)/(X.shape[0])\n            self.decision_scores_.append((index, score))\n            return score\n        elif type(X_train) == int:\n            print('Error! Detector is not fed to the object and X_train is not known')\n        elif neighborhood != 'all':\n            length = X.shape[0]\n            neighbor = int(self.neighborhood/2)\n\n            if index + neighbor < self.n_train_ and index - neighbor > 0: \n                region = np.concatenate((data[index - neighbor: index], data[index + window: index + neighbor] ))\n                D = np.max(region) - np.min(region)\n            elif index + neighbor >= self.n_train_ and index + window < self.n_train_:\n                region = np.concatenate((data[self.n_train_ - neighborhood: index], data[index + window: self.n_train_] ))\n                D =  np.max(region) - np.min(region)   \n            elif index + window >= self.n_train_:\n                region = data[self.n_train_ - neighborhood: index]\n                D = np.max(region) - np.min(region) \n            else:\n                region = np.concatenate((data[0: index], data[index + window: index + neighborhood] ))\n                D = np.max(region) - np.min(region) \n            \n            score = np.linalg.norm(X-Y, power)/D/(X.shape[0]**power)\n            self.decision_scores_.append((index, score))\n            return score\n    def set_param(self):\n        if self.detector != None:\n            self.window = self.detector.window\n            self.neighborhood = self.detector.neighborhood\n            self.n_train_ = self.detector.n_train_\n            self.X_train = self.detector.X_train_\n        else:\n            print('Error! Detector is not fed to the object and X_train is not known')\n        return self\n                \n\nclass Mahalanobis:\n    \"\"\" The function class for Mahalanobis measure\n    ----------\n    Probability : boolean, optional (default=False)\n        Whether to derive the anomoly score by the probability that such point occurs\n    neighborhood : int, optional (default=max (100, 10*window size))\n        The length of neighborhood to derivete the normalizing constant D which is based on\n        the difference of maximum and minimum in the neighborhood minus window. \n    Attributes\n    ----------\n    decision_scores_ : numpy array of shape (n_samples,)\n        The outlier scores of the training data.\n        The higher, the more abnormal. Outliers tend to have higher\n        scores. This value is available once the detector is\n        fitted.\n    detector: Object classifier\n        the anomaly detector that is used\n    \"\"\"\n    def __init__(self, probability = False):\n        self.probability = probability\n        self.detector = None\n        self.decision_scores_  = []\n        self.mu = 0\n        \n    def set_param(self):\n        '''update the parameters with the detector that is used \n        '''\n\n        self.n_initial_ = self.detector.n_initial_\n        self.estimation = self.detector.estimation\n        self.X_train = self.detector.X_train_\n        self.window = self.detector.window\n        window = self.window\n        resid = self.X_train - self.estimation\n        number = max(100, self.window)\n        self.residual = np.zeros((window, number))\n        for i in range(number):\n            self.residual[:, i] = resid[self.n_initial_+i:self.n_initial_+i+window]\n        self.mu = np.zeros(number)\n        self.cov = np.cov(self.residual, rowvar=1)\n        if self.window == 1:\n            self.cov = (np.sum(np.square(self.residual))/(number - 1))**0.5\n        return self\n    def norm_pdf_multivariate(self, x):\n        '''multivarite normal density function\n        '''\n        try:\n            mu = self.mu\n        except:\n            mu = np.zeros(x.shape[0])\n        sigma = self.cov\n        size = x.shape[0]\n        if size == len(mu) and (size, size) == sigma.shape:\n            det = np.linalg.det(sigma)\n            if det == 0:\n                raise NameError(\"The covariance matrix can't be singular\")\n\n            norm_const = 1.0/ ( math.pow((2*math.pi),float(size)/2) * math.pow(det,1.0/2) )\n            x_mu = np.matrix(x - mu)\n            inv = np.linalg.inv(sigma)        \n            result = math.pow(math.e, -0.5 * (x_mu * inv * x_mu.T))\n            return norm_const * result\n        else:\n            raise NameError(\"The dimensions of the input don't match\")\n    def normpdf(self, x):\n        '''univariate normal\n        '''\n        mean = 0\n        sd = np.asscalar(self.cov)\n        var = float(sd)**2\n        denom = (2*math.pi*var)**.5\n        num = math.exp(-(float(x)-float(mean))**2/(2*var))\n        return num/denom \n\n    def measure(self, X, Y, index):\n        \"\"\"Derive the decision score based on the given distance measure \n        Parameters\n        ----------\n        X : numpy array of shape (n_samples, )\n            The real input samples subsequence.\n        Y : numpy array of shape (n_samples, )\n            The estimated input samples subsequence.\n        Index : int\n        the index of the starting point in the subsequence\n        Returns\n        -------\n        score : float\n            dissimiarity score between the two subsquence\n        \"\"\"\n        mu = np.zeros(self.detector.window)\n        cov = self.cov\n        if self.probability == False:\n\n            if X.shape[0] == mu.shape[0]:\n                score = np.matmul(np.matmul((X-Y-mu).T, cov), (X-Y-mu))/(X.shape[0])\n                self.decision_scores_.append((index, score))\n                return score\n            else:\n                return (X-Y).T.dot(X-Y)\n\n        else:\n            if len(X) > 1:\n                prob = self.norm_pdf_multivariate(X-Y)\n            elif len(X) == 1: \n                X = np.asscalar(X)\n                Y = np.asscalar(Y)\n                prob = self.normpdf(X-Y)\n            else:\n                prob = 1\n            score = 1 - prob\n            score = max(score, 0)\n            self.decision_scores_.append((index, score))\n            return score\n\n\nclass Garch:\n    \"\"\" The function class for garch measure\n    ----------\n    p, q : int, optional (default=1, 1)\n        The order of the garch model to be fitted on the residual\n    mean : string, optional (default='zero' )\n        The forecast conditional mean. \n    vol: string, optional (default = 'garch')\n        he forecast conditional variance.\n    Attributes\n    ----------\n    decision_scores_ : numpy array of shape (n_samples,)\n        The outlier scores of the training data.\n        The higher, the more abnormal. Outliers tend to have higher\n        scores. This value is available once the detector is\n        fitted.\n    detector: Object classifier\n        the anomaly detector that is used\n    \"\"\"\n    def __init__(self, p = 1, q = 1, mean = 'zero', vol = 'garch'):\n        self.p = p\n        self.q = q\n        self.vol = vol\n        self.mean = mean\n        self.decision_scores_  = []\n        \n    def set_param(self):\n        '''update the parameters with the detector that is used \n        '''\n        q = self.q\n        p=self.p\n        mean = self.mean\n        vol = self.vol\n        if self.detector != None:\n            self.n_initial_ = self.detector.n_initial_\n            self.estimation = self.detector.estimation\n            self.X_train = self.detector.X_train_\n            self.window = self.detector.window\n            window = self.window\n            resid = 10 * (self.X_train - self.estimation)\n            model = arch_model(resid, mean=mean, vol=vol, p=p, q=q)\n            model_fit = model.fit(disp='off')\n            self.votility = model_fit.conditional_volatility/10\n        else:\n            print('Error! Detector not fed to the measure')\n        return self\n\n    def measure(self, X, Y, index):\n        \"\"\"Derive the decision score based on the given distance measure \n        Parameters\n        ----------\n        X : numpy array of shape (n_samples, )\n            The real input samples subsequence.\n        Y : numpy array of shape (n_samples, )\n            The estimated input samples subsequence.\n        Index : int\n        the index of the starting point in the subsequence\n        Returns\n        -------\n        score : float\n            dissimiarity score between the two subsquences\n        \"\"\"\n        X = np.array(X)\n        Y = np.array(Y)\n        length = len(X)\n        score = 0\n        if length != 0:\n            for i in range(length):\n                sigma = self.votility[index + i]\n                if sigma != 0:\n                    score += abs(X[i]-Y[i])/sigma\n                    \n            score = score/length       \n        return score\n\n\nclass SSA_DISTANCE:\n    \"\"\" The function class for SSA measure\n    good for contextual anomolies\n    ----------\n    method : string, optional (default='linear' )\n        The method to fit the line and derives the SSA score\n    e: float, optional (default = 1)\n        The upper bound to start new line search for linear method\n    Attributes\n    ----------\n    decision_scores_ : numpy array of shape (n_samples,)\n        The outlier scores of the training data.\n        The higher, the more abnormal. Outliers tend to have higher\n        scores. This value is available once the detector is\n        fitted.\n    detector: Object classifier\n        the anomaly detector that is used\n    \"\"\"\n    def __init__(self, method ='linear', e = 1):\n        self.method = method\n        self.decision_scores_  = []\n        self.e = e\n    def Linearization(self, X2):\n        \"\"\"Obtain the linearized curve.\n        Parameters\n        ----------\n        X2 : numpy array of shape (n, )\n            the time series curve to be fitted\n        e: float, integer, or numpy array \n        weights to obtain the \n        Returns\n        -------\n        fit: parameters for the fitted linear curve\n        \"\"\"\n        e = self.e\n        i = 0\n        fit = {}\n        fit['index'] = []\n        fit['rep'] = []\n        while i < len(X2):\n            fit['index'].append(i)\n            try:\n                fit['Y'+str(i)]= X2[i]\n            except:\n                print(X2.shape, X2)\n            fit['rep'].append(np.array([i, X2[i]]))\n            if i+1 >= len(X2):\n                    break\n            k = X2[i+1]-X2[i]\n            b = -i*(X2[i+1]-X2[i])+X2[i]\n            fit['reg' +str(i)]= np.array([k, b])\n            i += 2\n            if i >= len(X2):\n                break\n            d = np.abs(X2[i]- (k * i+b))\n            while d < e:\n                i +=1 \n                if i >= len(X2):\n                    break\n                d = np.abs(X2[i]- (k * i+b)) \n        return fit   \n    def set_param(self):\n        '''update the parameters with the detector that is used. \n        Since the SSA measure doens't need the attributes of detector\n        or characteristics of X_train, the process is omitted. \n        '''\n\n        return self\n\n    def measure(self, X2, X3, start_index):\n        \"\"\"Obtain the SSA similarity score.\n        Parameters\n        ----------\n        X2 : numpy array of shape (n, )\n            the reference timeseries\n        X3 : numpy array of shape (n, )\n            the tested timeseries\n        e: float, integer, or numpy array \n        weights to obtain the \n        Returns\n        -------\n        score: float, the higher the more dissimilar are the two curves \n        \"\"\"       \n        #linearization of data X2 and X3\n        X2 = np.array(X2)\n        X3 = np.array(X3)\n        e = self.e\n        fit = self.Linearization(X2)\n        fit2 = self.Linearization(X3)\n    \n        #line alinement \n        Index = []\n        test_list = fit['index'] + fit2['index']\n        [Index.append(x) for x in test_list if x not in Index]\n        Y = 0\n    \n        #Similarity Computation\n        for i in Index:\n            if i in fit['index'] and i in fit2['index']:\n                Y += abs(fit['Y'+str(i)]-fit2['Y'+str(i)])\n\n            elif i in fit['index']:\n                J = np.max(np.where(np.array(fit2['index']) < i ))\n                index = fit2['index'][J]\n                k = fit2['reg'+str(index)][0]\n                b = fit2['reg'+str(index)][1]\n                value = abs(k * i + b - fit['Y'+str(i)])\n                Y += value\n            elif i in fit2['index']:\n                J = np.max(np.where(np.array(fit['index']) < i ))\n                index = fit['index'][J]\n                k = fit['reg'+str(index)][0]\n                b = fit['reg'+str(index)][1]\n                value = abs(k * i + b - fit2['Y'+str(i)])\n                Y += value\n        if len(Index) != 0: \n            score = Y/len(Index)\n        else:\n            score = 0\n        self.decision_scores_.append((start_index, score))\n        if len(X2) == 1:\n            print('Error! SSA measure doesn\\'t apply to singleton' )\n        else:\n            return score  \n\n\nclass Fourier:\n    \"\"\" The function class for Fourier measure\n    good for contextual anomolies\n    ----------\n    power: int, optional (default = 2)\n        Lp norm for dissimiarlity measure considered\n    Attributes\n    ----------\n    decision_scores_ : numpy array of shape (n_samples,)\n        The outlier scores of the training data.\n        The higher, the more abnormal. Outliers tend to have higher\n        scores. This value is available once the detector is\n        fitted.\n    detector: Object classifier\n        the anomaly detector that is used\n    \"\"\"\n    def __init__(self, power = 2):\n        self.decision_scores_  = []\n        self.power = power\n    def set_param(self):\n        '''update the parameters with the detector that is used \n        since the FFT measure doens't need the attributes of detector\n        or characteristics of X_train, the process is omitted. \n        '''\n\n        return self\n\n    def measure(self, X2, X3, start_index):\n        \"\"\"Obtain the SSA similarity score.\n        Parameters\n        ----------\n        X2 : numpy array of shape (n, )\n            the reference timeseries\n        X3 : numpy array of shape (n, )\n            the tested timeseries\n        index: int, \n        current index for the subseqeuence that is being measured \n        Returns\n        -------\n        score: float, the higher the more dissimilar are the two curves \n        \"\"\"       \n \n        power = self.power\n        X2 = np.array(X2)\n        X3 = np.array(X3)\n        if len(X2) == 0:\n            score = 0\n        else:\n            X2 = np.fft.fft(X2);\n            X3 = np.fft.fft(X3)\n            score = np.linalg.norm(X2 - X3, ord = power)/len(X3)\n        self.decision_scores_.append((start_index, score))\n        return score\n\n\nclass DTW:\n    \"\"\" The function class for dynamic time warping measure\n\n    ----------\n    method : string, optional (default='L2' )\n        The distance measure to derive DTW.\n        Avaliable \"L2\", \"L1\", and custom\n    Attributes\n    ----------\n    decision_scores_ : numpy array of shape (n_samples,)\n        The outlier scores of the training data.\n        The higher, the more abnormal. Outliers tend to have higher\n        scores. This value is available once the detector is\n        fitted.\n    detector: Object classifier\n        the anomaly detector that is used\n    \"\"\"\n    def __init__(self, method = 'L2'):\n        self.decision_scores_  = []\n        if type(method) == str:\n            if method == 'L1':\n                distance = lambda x, y: abs(x-y)\n            elif method == 'L2':\n                distance = lambda x, y: (x-y)**2\n        else:\n            distance = method\n        self.distance = distance\n    def set_param(self):\n        '''update the parameters with the detector that is used \n        since the FFT measure doens't need the attributes of detector\n        or characteristics of X_train, the process is omitted. \n        '''\n\n        return self\n\n    def measure(self, X1, X2, start_index):\n        \"\"\"Obtain the SSA similarity score.\n        Parameters\n        ----------\n        X1 : numpy array of shape (n, )\n            the reference timeseries\n        X2 : numpy array of shape (n, )\n            the tested timeseries\n        index: int, \n        current index for the subseqeuence that is being measured \n        Returns\n        -------\n        score: float, the higher the more dissimilar are the two curves \n        \"\"\"       \n        distance = self.distance\n        X1 = np.array(X1)\n        X2 = np.array(X2)\n        \n        value = 1\n        if len(X1)==0:\n            value =0\n            X1= np.zeros(5)\n            X2 = X1\n        M = np.zeros((len(X1), len(X2)))\n        for index_i in range(len(X1)):\n            for index_j in range(len(X1) - index_i):\n                L = []\n                i = index_i\n                j = index_i + index_j\n                D = distance(X1[i], X2[j])\n                try:\n                    L.append(M[i-1, j-1])\n                except:\n                    L.append(np.inf)\n                try:\n                    L.append(M[i, j-1])\n                except:\n                    L.append(np.inf)\n                try:\n                    L.append(M[i-1, j])\n                except:\n                    L.append(np.inf)\n                D += min(L)\n                M[i,j] = D\n                if i !=j:\n                    L = []\n                    j = index_i\n                    i = index_i + index_j\n                    D = distance(X1[i], X2[j])\n                    try:\n                        L.append(M[i-1, j-1])\n                    except:\n                        L.append(np.inf)\n                    try:\n                        L.append(M[i, j-1])\n                    except:\n                        L.append(np.inf)\n                    try:\n                        L.append(M[i-1, j])\n                    except:\n                        L.append(np.inf)\n                    D += min(L)\n                    M[i,j] = D\n        \n        score = M[len(X1)-1, len(X1)-1]/len(X1)\n        if value == 0:\n            score = 0\n        self.decision_scores_.append((start_index, score))\n        return score\n\n\nclass EDRS:\n    \"\"\" The function class for edit distance on real sequences \n\n    ----------\n    method : string, optional (default='L2' )\n        The distance measure to derive DTW.\n        Avaliable \"L2\", \"L1\", and custom\n    ep: float, optiona (default = 0.1)\n        the threshold value to decide Di_j\n    vot : boolean, optional (default = False)\n        whether to adapt a chaging votilities estimaed by garch\n        for ep at different windows. \n    Attributes\n    ----------\n    decision_scores_ : numpy array of shape (n_samples,)\n        The outlier scores of the training data.\n        The higher, the more abnormal. Outliers tend to have higher\n        scores. This value is available once the detector is\n        fitted.\n    detector: Object classifier\n        the anomaly detector that is used\n    \"\"\"\n    def __init__(self, method = 'L1', ep = False, vol = False):\n        self.decision_scores_  = []\n        if type(method) == str:\n            if method == 'L1':\n                distance = lambda x, y: abs(x-y)\n        else:\n            distance = method\n        self.distance = distance\n        self.ep = ep\n        self.vot = vol\n    def set_param(self):\n        '''update the ep based on the votalitiy of the model \n        '''\n        estimation = np.array(self.detector.estimation )\n        initial = self.detector.n_initial_\n        X = np.array(self.detector.X_train_)\n        self.initial = initial\n        residual = estimation[initial:] - X[initial:]\n        number = len(residual)\n        #var = (np.sum(np.square(residual))/(number - 1))**0.5\n        vot = self.vot\n        if vot == False:\n            var = np.var(residual)\n        else:\n            model = arch_model(10 * residual, mean='Constant', vol='garch', p=1, q=1)\n            model_fit = model.fit(disp='off')\n            var = model_fit.conditional_volatility/10\n            \n        if self.ep == False:\n            self.ep =  3 * (np.sum(np.square(residual))/(len(residual) - 1))**0.5\n        else: \n            self.ep = self.ep\n        \n        \n        return self\n\n    def measure(self, X1, X2, start_index):\n        \"\"\"Obtain the SSA similarity score.\n        Parameters\n        ----------\n        X1 : numpy array of shape (n, )\n            the reference timeseries\n        X2 : numpy array of shape (n, )\n            the tested timeseries\n        index: int, \n        current index for the subseqeuence that is being measured \n        Returns\n        -------\n        score: float, the higher the more dissimilar are the two curves \n        \"\"\"       \n        distance = self.distance\n        X1 = np.array(X1)\n        X2 = np.array(X2)\n        vot = self.vot\n\n        if vot == False:\n            ep = self.ep\n        else:\n            try:\n                ep = self.ep[start_index - self.initial]\n            except:\n                #sometime start_index is the length of the number \n                ep = 0\n        value = 1\n        if len(X1)==0:\n            value =0\n            X1= np.zeros(5)\n            X2 = X1\n        M = np.zeros((len(X1), len(X2)))\n        M[:, 0] = np.arange(len(X1))\n        M[0, :] = np.arange(len(X1))\n        for index_i in range(1, len(X1)):\n            for index_j in range(len(X1) - index_i):\n\n                L = []\n                i = index_i\n                j = index_i + index_j\n                D = distance(X1[i], X2[j])\n                if D < ep:\n                    M[i, j]= M[i-1, j-1]\n                else:\n                    try:\n                        L.append(M[i-1, j-1])\n                    except:\n                        L.append(np.inf)\n                    try:\n                        L.append(M[i, j-1])\n                    except:\n                        L.append(np.inf)\n                    try:\n                        L.append(M[i-1, j])\n                    except:\n                        L.append(np.inf)\n                    M[i,j] = 1 + min(L)\n                if i !=j:\n                    L = []\n                    j = index_i\n                    i = index_i + index_j\n                    D = distance(X1[i], X2[j])\n                    if D < ep:\n                        M[i, j]= M[i-1, j-1]\n                    else: \n                        try:\n                            L.append(M[i-1, j-1])\n                        except:\n                            L.append(np.inf)\n                        try:\n                            L.append(M[i, j-1])\n                        except:\n                            L.append(np.inf)\n                        try:\n                            L.append(M[i-1, j])\n                        except:\n                            L.append(np.inf)\n                        M[i,j] = 1 + min(L)\n\n        score = M[len(X1)-1, len(X1)-1]/len(X1)\n        if value == 0:\n            score = 0\n        self.decision_scores_.append((start_index, score))\n        return score\n\nclass TWED:\n    \"\"\" Function class for Time-warped edit distance(TWED) measure\n\n    ----------\n    method : string, optional (default='L2' )\n        The distance measure to derive DTW.\n        Avaliable \"L2\", \"L1\", and custom\n    gamma: float, optiona (default = 0.1)\n        mismatch penalty\n    v : float, optional (default = False)\n        stifness parameter\n    Attributes\n    ----------\n    decision_scores_ : numpy array of shape (n_samples,)\n        The outlier scores of the training data.\n        The higher, the more abnormal. Outliers tend to have higher\n        scores. This value is available once the detector is\n        fitted.\n    detector: Object classifier\n        the anomaly detector that is used\n    \"\"\"\n    def __init__(self, gamma = 0.1, v = 0.1):\n        self.decision_scores_  = []\n\n        self.gamma = gamma\n        self.v = v\n    def set_param(self):\n        '''No need'''     \n        return self\n    \n    def measure(self, A, B, start_index):\n        \"\"\"Obtain the SSA similarity score.\n        Parameters\n        ----------\n        X1 : numpy array of shape (n, )\n            the reference timeseries\n        X2 : numpy array of shape (n, )\n            the tested timeseries\n        index: int, \n        current index for the subseqeuence that is being measured \n        Returns\n        -------\n        score: float, the higher the more dissimilar are the two curves \n        \"\"\"    \n        #code modifed from wikipedia\n        Dlp = lambda x,y: abs(x-y)\n        timeSB = np.arange(1,len(B)+1)\n        timeSA = np.arange(1,len(A)+1)\n        nu = self.v\n        _lambda = self.gamma\n        # Reference :\n        #    Marteau, P.; F. (2009). \"Time Warp Edit Distance with Stiffness Adjustment for Time Series Matching\".\n        #    IEEE Transactions on Pattern Analysis and Machine Intelligence. 31 (2): 306–318. arXiv:cs/0703033\n        #    http://people.irisa.fr/Pierre-Francois.Marteau/\n\n        # Check if input arguments\n        if len(A) != len(timeSA):\n            print(\"The length of A is not equal length of timeSA\")\n            return None, None\n    \n        if len(B) != len(timeSB):\n            print(\"The length of B is not equal length of timeSB\")\n            return None, None\n\n        if nu < 0:\n            print(\"nu is negative\")\n            return None, None\n\n        # Add padding\n        A = np.array([0] + list(A))\n        timeSA = np.array([0] + list(timeSA))\n        B = np.array([0] + list(B))\n        timeSB = np.array([0] + list(timeSB))\n\n        n = len(A)\n        m = len(B)\n        # Dynamical programming\n        DP = np.zeros((n, m))\n\n        # Initialize DP Matrix and set first row and column to infinity\n        DP[0, :] = np.inf\n        DP[:, 0] = np.inf\n        DP[0, 0] = 0\n\n        # Compute minimal cost\n        for i in range(1, n):\n            for j in range(1, m):\n                # Calculate and save cost of various operations\n                C = np.ones((3, 1)) * np.inf\n                # Deletion in A\n                C[0] = (\n                    DP[i - 1, j]\n                    + Dlp(A[i - 1], A[i])\n                    + nu * (timeSA[i] - timeSA[i - 1])\n                    + _lambda\n                )\n                # Deletion in B\n                C[1] = (\n                    DP[i, j - 1]\n                    + Dlp(B[j - 1], B[j])\n                    + nu * (timeSB[j] - timeSB[j - 1])\n                    + _lambda\n                )\n                # Keep data points in both time series\n                C[2] = (\n                    DP[i - 1, j - 1]\n                    + Dlp(A[i], B[j])\n                    + Dlp(A[i - 1], B[j - 1])\n                    + nu * (abs(timeSA[i] - timeSB[j]) + abs(timeSA[i - 1] - timeSB[j - 1]))\n                )\n                # Choose the operation with the minimal cost and update DP Matrix\n                DP[i, j] = np.min(C)\n        distance = DP[n - 1, m - 1]\n        self.M = DP\n        self.decision_scores_.append((start_index, distance))\n        return distance"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/models/feature.py",
    "content": "# -*- coding: utf-8 -*-\n\"\"\"Classes of feature mapping for model type B\n\"\"\"\n\nimport numpy as np\n# import matplotlib.pyplot as plt\n# import random\n# from arch import arch_model\nimport pandas as pd\nimport math\n# import pmdarima as pm\n# from pmdarima import model_selection\n# import os\n# import dis\n# import statistics\n# from sklearn import metrics\n# import sklearn\nfrom tsfresh import extract_features\n\nfrom statsmodels.tsa.seasonal import seasonal_decompose\n\n# import itertools\n# import functools\nimport warnings\nfrom builtins import range\n# from collections import defaultdict\n\n\nfrom numpy.linalg import LinAlgError\n# from scipy.signal import cwt, find_peaks_cwt, ricker, welch\n# from scipy.stats import linregress\n# from statsmodels.tools.sm_exceptions import MissingDataError\n\nwith warnings.catch_warnings():\n    # Ignore warnings of the patsy package\n    warnings.simplefilter(\"ignore\", DeprecationWarning)\n\n    from statsmodels.tsa.ar_model import AR\n# from statsmodels.tsa.stattools import acf, adfuller, pacf\n\nfrom hurst import compute_Hc\n\nclass Window:\n    \"\"\" The  class for rolling window feature mapping.\n    The mapping converts the original timeseries X into a matrix. \n    The matrix consists of rows of sliding windows of original X. \n    \"\"\"\n\n    def __init__(self,  window = 100):\n        self.window = window\n        self.detector = None\n    def convert(self, X):\n        n = self.window\n        X = pd.Series(X)\n        L = []\n        if n == 0:\n            df = X\n        else:\n            for i in range(n):\n                L.append(X.shift(i))\n            df = pd.concat(L, axis = 1)\n            df = df.iloc[n-1:]\n        return df\n\nclass tf_Stat:\n    '''statisitc feature extraction using the tf_feature package. \n    It calculates 763 features in total so it might be over complicated for some models. \n    Recommend to use for methods like Isolation Forest which randomly picks a feature\n    and then perform the classification. To use for other distance-based model like KNN,\n    LOF, CBLOF, etc, first train to pass a function that give weights to individual features so that\n    inconsequential features won't cloud the important ones (mean, variance, kurtosis, etc).\n\n    '''\n    def __init__(self,  window = 100, step = 25):\n        self.window = window\n        self.step = step\n        self.detector = None\n    def convert(self, X):\n        window = self.window\n        step = self.step\n        pos = math.ceil(window/2)\n        #step <= window\n\n        length = X.shape[0]\n\n        Xd = pd.DataFrame(X)\n        Xd.columns = pd.Index(['x'], dtype='object')\n        Xd['id'] = 1\n        Xd['time'] = Xd.index\n        \n        test = np.array(extract_features(Xd.iloc[0+pos-math.ceil(window/2):0+pos + math.floor(window/2)], column_id=\"id\", column_sort=\"time\", column_kind=None, column_value=None).fillna(0))\n        M = np.zeros((length - window, test.shape[1]+1 ))\n\n        \n        i = 0\n        while i + window <= M.shape[0]:\n            M[i:i+step, 0]= X[pos + i: pos + i + step]\n            vector = np.array(extract_features(Xd.iloc[i+pos-math.ceil(window/2):i+pos + math.floor(window/2)], column_id=\"id\", column_sort=\"time\", column_kind=None, column_value=None).fillna(0))\n\n            M[i:i+step, 1:] = vector\n            i+= step\n        num = M.shape[0]\n        if i <  num:\n            M[i: num, 0]= X[pos + i: pos + num]\n            M[i: num, 1:] = np.array(extract_features(Xd.iloc[i+pos-math.ceil(window/2):], column_id=\"id\", column_sort=\"time\", column_kind=None, column_value=None).fillna(0))\n        return M\n\nclass Stat:\n    '''statisitc feature extraction. \n    Features include [mean, variance, skewness, kurtosis, autocorrelation, maximum, \n    minimum, entropy, seasonality, hurst component, AR coef]\n\n    '''\n    def __init__(self,  window = 100, data_step = 10, param = [{\"coeff\": 0, \"k\": 5}], lag = 1, freq = 720):\n        self.window = window\n        self.data_step = data_step\n        self.detector = None\n        self.param = param\n        self.lag = lag \n        self.freq =freq\n        if data_step > int(window/2):\n            raise ValueError('value step shoudm\\'t be greater than half of the window')\n        \n        \n    def convert(self, X):\n        freq = self.freq\n        n = self.window\n        data_step = self.data_step\n        X = pd.Series(X)\n        L = []\n        if n == 0:\n            df = X\n            raise ValueError('window lenght is set to zero')\n        else:\n            for i in range(n):\n                L.append(X.shift(i))\n            df = pd.concat(L, axis = 1)\n            df = df.iloc[n:]\n            df2 = pd.concat(L[:data_step], axis = 1)\n\n        \n        \n        df = df.reset_index()\n        #value \n        x0 = df2[math.ceil(n/2) : - math.floor(n/2)].reset_index()\n        #mean \n        x1 = (df.mean(axis=1))\n        #variance \n        x2 = df.var(axis=1)\n        #AR-coef\n        self.ar_function = lambda x: self.ar_coefficient(x)\n        x3 = df.apply(self.ar_function, axis =1, result_type='expand'  )\n        #autocorrelation\n        self.auto_function = lambda x: self.autocorrelation(x)\n        x4 = df.apply(self.auto_function, axis =1, result_type='expand'  )\n        #kurtosis\n        x5 = (df.kurtosis(axis=1))\n        #skewness\n        x6 = (df.skew(axis=1))\n        #maximum\n        x7 = (df.max(axis=1))\n        #minimum\n        x8 = (df.min(axis=1))\n        #entropy\n        self.entropy_function = lambda x: self.sample_entropy(x)\n        x9 = df.apply(self.entropy_function, axis =1, result_type='expand')\n        \n        #seasonality\n        result = seasonal_decompose(X, model='additive', freq = freq, extrapolate_trend='freq')\n        #seasonal\n        x10 = pd.Series(np.array(result.seasonal[math.ceil(n/2) : - math.floor(n/2)]))\n        #trend \n        x11 = pd.Series(np.array(result.trend[math.ceil(n/2) : - math.floor(n/2)]))\n        #resid \n        x12 = pd.Series(np.array(result.resid[math.ceil(n/2) : - math.floor(n/2)]))\n        \n        #Hurst component\n        self.hurst_function = lambda x: self.hurst_f(x)\n        x13 = df.apply(self.hurst_function, axis =1, result_type='expand')\n        \n        L = [x0, x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12, x13]\n        M = pd.concat(L, axis = 1)\n        M = M.drop(columns=['index'])\n\n        return M\n    def ar_coefficient(self, x):\n        \"\"\"\n        This feature calculator fits the unconditional maximum likelihood\n        of an autoregressive AR(k) process.\n        The k parameter is the maximum lag of the process\n\n        .. math::\n\n            X_{t}=\\\\varphi_0 +\\\\sum _{{i=1}}^{k}\\\\varphi_{i}X_{{t-i}}+\\\\varepsilon_{t}\n\n        For the configurations from param which should contain the maxlag \"k\" and such an AR process is calculated. Then\n        the coefficients :math:`\\\\varphi_{i}` whose index :math:`i` contained from \"coeff\" are returned.\n\n        :param x: the time series to calculate the feature of\n        :type x: numpy.ndarray\n        :param param: contains dictionaries {\"coeff\": x, \"k\": y} with x,y int\n        :type param: list\n        :return x: the different feature values\n        :return type: pandas.Series\n        \"\"\"\n        calculated_ar_params = {}\n        param = self.param\n        x_as_list = list(x)\n\n        res = {}\n\n        for parameter_combination in param:\n            k = parameter_combination[\"k\"]\n            p = parameter_combination[\"coeff\"]\n\n            column_name = \"coeff_{}__k_{}\".format(p, k)\n\n            if k not in calculated_ar_params:\n                try:\n                    calculated_AR = AR(x_as_list)\n                    calculated_ar_params[k] = calculated_AR.fit(maxlag=k, solver=\"mle\").params\n                except (LinAlgError, ValueError):\n                    calculated_ar_params[k] = [np.NaN] * k\n\n            mod = calculated_ar_params[k]\n\n            if p <= k:\n                try:\n                    res[column_name] = mod[p]\n                except IndexError:\n                    res[column_name] = 0\n            else:\n                res[column_name] = np.NaN\n\n        L = [(key, value) for key, value in res.items()]\n        L0 = []\n        for item in L:\n            L0.append(item[1])\n        return L0\n\n    def autocorrelation(self, x):\n        \"\"\"\n        Calculates the autocorrelation of the specified lag, according to the formula [1]\n\n        .. math::\n\n            \\\\frac{1}{(n-l)\\\\sigma^{2}} \\\\sum_{t=1}^{n-l}(X_{t}-\\\\mu )(X_{t+l}-\\\\mu)\n\n        where :math:`n` is the length of the time series :math:`X_i`, :math:`\\\\sigma^2` its variance and :math:`\\\\mu` its\n        mean. `l` denotes the lag.\n\n        .. rubric:: References\n\n        [1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation\n\n        :param x: the time series to calculate the feature of\n        :type x: numpy.ndarray\n        :param lag: the lag\n        :type lag: int\n        :return: the value of this feature\n        :return type: float\n        \"\"\"\n        lag = self.lag\n        # This is important: If a series is passed, the product below is calculated\n        # based on the index, which corresponds to squaring the series.\n        if isinstance(x, pd.Series):\n            x = x.values\n        if len(x) < lag:\n            return np.nan\n        # Slice the relevant subseries based on the lag\n        y1 = x[:(len(x) - lag)]\n        y2 = x[lag:]\n        # Subtract the mean of the whole series x\n        x_mean = np.mean(x)\n        # The result is sometimes referred to as \"covariation\"\n        sum_product = np.sum((y1 - x_mean) * (y2 - x_mean))\n        # Return the normalized unbiased covariance\n        v = np.var(x)\n        if np.isclose(v, 0):\n            return np.NaN\n        else:\n            return sum_product / ((len(x) - lag) * v)\n    def _into_subchunks(self, x, subchunk_length, every_n=1):\n        \"\"\"\n        Split the time series x into subwindows of length \"subchunk_length\", starting every \"every_n\".\n\n        For example, the input data if [0, 1, 2, 3, 4, 5, 6] will be turned into a matrix\n\n            0  2  4\n            1  3  5\n            2  4  6\n\n        with the settings subchunk_length = 3 and every_n = 2\n        \"\"\"\n        len_x = len(x)\n\n        assert subchunk_length > 1\n        assert every_n > 0\n\n        # how often can we shift a window of size subchunk_length over the input?\n        num_shifts = (len_x - subchunk_length) // every_n + 1\n        shift_starts = every_n * np.arange(num_shifts)\n        indices = np.arange(subchunk_length)\n\n        indexer = np.expand_dims(indices, axis=0) + np.expand_dims(shift_starts, axis=1)\n        return np.asarray(x)[indexer]\n    def sample_entropy(self, x):\n        \"\"\"\n        Calculate and return sample entropy of x.\n\n        .. rubric:: References\n\n        |  [1] http://en.wikipedia.org/wiki/Sample_Entropy\n        |  [2] https://www.ncbi.nlm.nih.gov/pubmed/10843903?dopt=Abstract\n\n        :param x: the time series to calculate the feature of\n        :type x: numpy.ndarray\n\n        :return: the value of this feature\n        :return type: float\n        \"\"\"\n        x = np.array(x)\n\n        # if one of the values is NaN, we can not compute anything meaningful\n        if np.isnan(x).any():\n            return np.nan\n\n        m = 2  # common value for m, according to wikipedia...\n        tolerance = 0.2 * np.std(x)  # 0.2 is a common value for r, according to wikipedia...\n\n        # Split time series and save all templates of length m\n        # Basically we turn [1, 2, 3, 4] into [1, 2], [2, 3], [3, 4]\n        xm = self._into_subchunks(x, m)\n\n        # Now calculate the maximum distance between each of those pairs\n        #   np.abs(xmi - xm).max(axis=1)\n        # and check how many are below the tolerance.\n        # For speed reasons, we are not doing this in a nested for loop,\n        # but with numpy magic.\n        # Example:\n        # if x = [1, 2, 3]\n        # then xm = [[1, 2], [2, 3]]\n        # so we will substract xm from [1, 2] => [[0, 0], [-1, -1]]\n        # and from [2, 3] => [[1, 1], [0, 0]]\n        # taking the abs and max gives us:\n        # [0, 1] and [1, 0]\n        # as the diagonal elements are always 0, we substract 1.\n        B = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= tolerance) - 1 for xmi in xm])\n\n        # Similar for computing A\n        xmp1 = self._into_subchunks(x, m + 1)\n\n        A = np.sum([np.sum(np.abs(xmi - xmp1).max(axis=1) <= tolerance) - 1 for xmi in xmp1])\n\n        # Return SampEn\n        return -np.log(A / B)\n    def hurst_f(self, x):\n        H,c, M = compute_Hc(x)\n        return [H, c]"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/utils/metrics.py",
    "content": "from sklearn import metrics\nimport numpy as np\nimport math\n# import matplotlib.pyplot as plt\n\nclass metricor:\n    def __init__(self, a = 1, probability = True, bias = 'flat', ):\n        self.a = a\n        self.probability = probability\n        self.bias = bias \n    \n    def detect_model(self, model, label, contamination = 0.1, window = 100, is_A = False, is_threshold = True):\n        if is_threshold:\n            score = self.scale_threshold(model.decision_scores_, model._mu, model._sigma)\n        else:\n            score = self.scale_contamination(model.decision_scores_, contamination = contamination)\n        if is_A is False:\n            scoreX = np.zeros(len(score)+window)\n            scoreX[math.ceil(window/2): len(score)+window - math.floor(window/2)] = score \n        else:\n            scoreX = score\n            \n        self.score_=scoreX\n        L = self.metric(label, scoreX)\n        return L\n\n        \n    def labels_conv(self, preds):\n        '''return indices of predicted anomaly\n        '''\n\n        # p = np.zeros(len(preds))\n        index = np.where(preds >= 0.5)\n        return index[0]\n    \n    def labels_conv_binary(self, preds):\n        '''return predicted label\n        '''\n        p = np.zeros(len(preds))\n        index = np.where(preds >= 0.5)\n        p[index[0]] = 1\n        return p \n\n\n    def w(self, AnomalyRange, p):\n        MyValue = 0\n        MaxValue = 0\n        start = AnomalyRange[0]\n        AnomalyLength = AnomalyRange[1] - AnomalyRange[0] + 1\n        for i in range(start, start +AnomalyLength):\n            bi = self.b(i, AnomalyLength)\n            MaxValue +=  bi\n            if i in p:\n                MyValue += bi\n        return MyValue/MaxValue\n\n    def Cardinality_factor(self, Anomolyrange, Prange):\n        score = 0 \n        start = Anomolyrange[0]\n        end = Anomolyrange[1]\n        for i in Prange:\n            if i[0] >= start and i[0] <= end:\n                score +=1 \n            elif start >= i[0] and start <= i[1]:\n                score += 1\n            elif end >= i[0] and end <= i[1]:\n                score += 1\n            elif start >= i[0] and end <= i[1]:\n                score += 1\n        if score == 0:\n            return 0\n        else:\n            return 1/score\n        \n    def b(self, i, length):\n        bias = self.bias \n        if bias == 'flat':\n            return 1\n        elif bias == 'front-end bias':\n            return length - i + 1\n        elif bias == 'back-end bias':\n            return i\n        else:\n            if i <= length/2:\n                return i\n            else:\n                return length - i + 1\n\n\n    def scale_threshold(self, score, score_mu, score_sigma):\n        return (score >= (score_mu + 3*score_sigma)).astype(int)\n    \n    \n    def metric_new(self, label, score, plot_ROC=False, alpha=0.2,coeff=3):\n        '''input:\n               Real labels and anomaly score in prediction\n            \n           output:\n               AUC, \n               Precision, \n               Recall, \n               F-score, \n               Range-precision, \n               Range-recall, \n               Range-Fscore, \n               Precison@k, \n             \n            k is chosen to be # of outliers in real labels\n        '''\n        if np.sum(label) == 0:\n            print('All labels are 0. Label must have groud truth value for calculating AUC score.')\n            return None\n        \n        if np.isnan(score).any() or score is None:\n            print('Score must not be none.')\n            return None\n        \n        #area under curve\n        auc = metrics.roc_auc_score(label, score)\n        # plor ROC curve\n        if plot_ROC:\n            fpr, tpr, thresholds  = metrics.roc_curve(label, score)\n            # display = metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=auc)\n            # display.plot()            \n            \n        #precision, recall, F\n        \n        preds = score > (np.mean(score)+coeff*np.std(score))\n        if np.sum(preds) == 0:\n            preds = score > (np.mean(score)+2*np.std(score))\n            if np.sum(preds) == 0:\n                preds = score > (np.mean(score)+1*np.std(score))\n        Precision, Recall, F, Support = metrics.precision_recall_fscore_support(label, preds, zero_division=0)\n        precision = Precision[1]\n        recall = Recall[1]\n        f = F[1]\n\n        #range anomaly \n        Rrecall, ExistenceReward, OverlapReward = self.range_recall_new(label, preds, alpha)\n        Rprecision = self.range_recall_new(preds, label, 0)[0]\n        \n        if Rprecision + Rrecall==0:\n            Rf=0\n        else:\n            Rf = 2 * Rrecall * Rprecision / (Rprecision + Rrecall)\n        \n        # top-k\n        k = int(np.sum(label))\n        threshold = np.percentile(score, 100 * (1-k/len(label)))\n        \n        # precision_at_k = metrics.top_k_accuracy_score(label, score, k)\n        p_at_k = np.where(preds > threshold)[0]\n        TP_at_k = sum(label[p_at_k])\n        precision_at_k = TP_at_k/k\n        \n        L = [auc, precision, recall, f, Rrecall, ExistenceReward, OverlapReward, Rprecision, Rf, precision_at_k]\n        if plot_ROC:\n            return L, fpr, tpr\n        return L\n\n    def metric_PR(self, label, score):\n        precision, recall, thresholds = metrics.precision_recall_curve(label, score)\n        # plt.figure()\n        # disp = metrics.PrecisionRecallDisplay(precision=precision, recall=recall)\n        # disp.plot()\n        AP = metrics.auc(recall, precision)\n        #AP = metrics.average_precision_score(label, score)\n        return precision, recall, AP\n        \n    def range_recall_new(self, labels, preds, alpha):   \n\n\n        p = np.where(preds == 1)[0]    # positions of predicted label==1\n        range_pred = self.range_convers_new(preds)  \n        range_label = self.range_convers_new(labels)\n        \n        Nr = len(range_label)    # total # of real anomaly segments\n\n        ExistenceReward = self.existence_reward(range_label, p)\n\n\n        OverlapReward = 0\n        for i in range_label:\n            OverlapReward += self.w(i, p) * self.Cardinality_factor(i, range_pred)\n\n\n        score = alpha * ExistenceReward + (1-alpha) * OverlapReward\n        if Nr != 0:\n            return score/Nr, ExistenceReward/Nr, OverlapReward/Nr\n        else:\n            return 0,0,0\n\n    def range_convers_new(self, label):\n        '''\n        input: arrays of binary values \n        output: list of ordered pair [[a0,b0], [a1,b1]... ] of the inputs\n        '''\n        L = []\n        i = 0\n        j = 0 \n        while j < len(label):\n            # print(i)\n            while label[i] == 0:\n                i+=1\n                if i >= len(label):\n                    break\n            j = i+1\n            # print('j'+str(j))\n            if j >= len(label):\n                if j==len(label):\n                    L.append((i,j-1))\n    \n                break\n            while label[j] != 0:\n                j+=1\n                if j >= len(label):\n                    L.append((i,j-1))\n                    break\n            if j >= len(label):\n                break\n            L.append((i, j-1))\n            i = j\n        return L\n        \n    def existence_reward(self, labels, preds):\n        '''\n        labels: list of ordered pair \n        preds predicted data\n        '''\n\n        score = 0\n        for i in labels:\n            if np.sum(np.multiply(preds <= i[1], preds >= i[0])) > 0:\n                score += 1\n        return score\n    \n    def num_nonzero_segments(self, x):\n        count=0\n        if x[0]>0:\n            count+=1\n        for i in range(1, len(x)):\n            if x[i]>0 and x[i-1]==0:\n                count+=1\n        return count\n    \n    def extend_postive_range(self, x, window=5):\n        label = x.copy().astype(float)\n        L = self.range_convers_new(label)   # index of non-zero segments\n        length = len(label)\n        for k in range(len(L)):\n            s = L[k][0] \n            e = L[k][1] \n            \n            \n            x1 = np.arange(e,min(e+window//2,length))\n            label[x1] += np.sqrt(1 - (x1-e)/(window))\n            \n            x2 = np.arange(max(s-window//2,0),s)\n            label[x2] += np.sqrt(1 - (s-x2)/(window))\n            \n        label = np.minimum(np.ones(length), label)\n        return label\n    \n    def extend_postive_range_individual(self, x, percentage=0.2):\n        label = x.copy().astype(float)\n        L = self.range_convers_new(label)   # index of non-zero segments\n        length = len(label)\n        for k in range(len(L)):\n            s = L[k][0] \n            e = L[k][1] \n            \n            l0 = int((e-s+1)*percentage)\n            \n            x1 = np.arange(e,min(e+l0,length))\n            label[x1] += np.sqrt(1 - (x1-e)/(2*l0))\n            \n            x2 = np.arange(max(s-l0,0),s)\n            label[x2] += np.sqrt(1 - (s-x2)/(2*l0))\n            \n        label = np.minimum(np.ones(length), label)\n        return label\n    \n    def TPR_FPR_RangeAUC(self, labels, pred, P, L):\n        product = labels * pred\n        \n        TP = np.sum(product)\n        \n        # recall = min(TP/P,1)\n        P_new = (P+np.sum(labels))/2      # so TPR is neither large nor small\n        # P_new = np.sum(labels)\n        recall = min(TP/P_new,1)\n        # recall = TP/np.sum(labels)\n        # print('recall '+str(recall))\n        \n        \n        existence = 0\n        for seg in L:\n            if np.sum(product[seg[0]:(seg[1]+1)])>0:\n                existence += 1\n\n        if len(L) == 0:\n            existence_ratio = existence\n        else:\n            existence_ratio = existence/len(L)\n        # print(existence_ratio)\n        \n        # TPR_RangeAUC = np.sqrt(recall*existence_ratio)\n        # print(existence_ratio)\n        TPR_RangeAUC = recall*existence_ratio\n        \n        FP = np.sum(pred) - TP\n        # TN = np.sum((1-pred) * (1-labels))\n        \n        # FPR_RangeAUC = FP/(FP+TN)\n        N_new = len(labels) - P_new\n        FPR_RangeAUC = FP/N_new\n        \n        Precision_RangeAUC = TP/np.sum(pred)\n        \n        return TPR_RangeAUC, FPR_RangeAUC, Precision_RangeAUC\n    \n    def RangeAUC(self, labels, score, window=0, percentage=0, plot_ROC=False, AUC_type='window'):\n        # AUC_type='window'/'percentage'\n        score_sorted = -np.sort(-score)\n        \n        P = np.sum(labels)\n        # print(np.sum(labels))\n        if AUC_type=='window':\n            labels = self.extend_postive_range(labels, window=window)\n        else:   \n            labels = self.extend_postive_range_individual(labels, percentage=percentage)\n        \n        # print(np.sum(labels))\n        L = self.range_convers_new(labels)\n        TPR_list = [0]\n        FPR_list = [0]\n        Precision_list = [1]\n        \n        for i in np.linspace(0, len(score)-1, 250).astype(int):\n            threshold = score_sorted[i]\n            # print('thre='+str(threshold))\n            pred = score>= threshold\n            TPR, FPR, Precision = self.TPR_FPR_RangeAUC(labels, pred, P,L)\n            \n            TPR_list.append(TPR)\n            FPR_list.append(FPR)\n            Precision_list.append(Precision)\n            \n        TPR_list.append(1)\n        FPR_list.append(1)   # otherwise, range-AUC will stop earlier than (1,1)\n        \n        tpr = np.array(TPR_list)\n        fpr = np.array(FPR_list)\n        prec = np.array(Precision_list)\n        \n        width = fpr[1:] - fpr[:-1]\n        height = (tpr[1:] + tpr[:-1])/2\n        AUC_range = np.sum(width*height)\n        \n        width_PR = tpr[1:-1] - tpr[:-2]\n        height_PR = (prec[1:] + prec[:-1])/2\n        AP_range = np.sum(width_PR*height_PR)\n        \n        if plot_ROC:\n            return AUC_range, AP_range, fpr, tpr, prec\n        \n        return AUC_range\n        \n\n    # TPR_FPR_window\n    def RangeAUC_volume(self, labels_original, score, windowSize):\n        score_sorted = -np.sort(-score)\n        \n        tpr_3d=[]\n        fpr_3d=[]\n        prec_3d=[]\n        \n        auc_3d=[]\n        ap_3d=[]\n        \n        window_3d = np.arange(0, windowSize+1, 1)\n        P = np.sum(labels_original)\n       \n        for window in window_3d:\n            labels = self.extend_postive_range(labels_original, window)\n            \n            # print(np.sum(labels))\n            L = self.range_convers_new(labels)\n            TPR_list = [0]\n            FPR_list = [0]\n            Precision_list = [1]\n            \n            for i in np.linspace(0, len(score)-1, 250).astype(int):\n                threshold = score_sorted[i]\n                # print('thre='+str(threshold))\n                pred = score>= threshold\n                TPR, FPR, Precision = self.TPR_FPR_RangeAUC(labels, pred, P,L)\n                \n                TPR_list.append(TPR)\n                FPR_list.append(FPR)\n                Precision_list.append(Precision)\n                \n            TPR_list.append(1)\n            FPR_list.append(1)   # otherwise, range-AUC will stop earlier than (1,1)\n            \n            \n            tpr = np.array(TPR_list)\n            fpr = np.array(FPR_list)\n            prec = np.array(Precision_list)\n            \n            tpr_3d.append(tpr)\n            fpr_3d.append(fpr)\n            prec_3d.append(prec)\n            \n            width = fpr[1:] - fpr[:-1]\n            height = (tpr[1:] + tpr[:-1])/2\n            AUC_range = np.sum(width*height)\n            auc_3d.append(AUC_range)\n            \n            width_PR = tpr[1:-1] - tpr[:-2]\n            height_PR = (prec[1:] + prec[:-1])/2\n            AP_range = np.sum(width_PR*height_PR)\n            ap_3d.append(AP_range)\n\n        \n        return tpr_3d, fpr_3d, prec_3d, window_3d, sum(auc_3d)/len(window_3d), sum(ap_3d)/len(window_3d)\n\n\n\n\ndef generate_curve(label,score,slidingWindow):\n    tpr_3d, fpr_3d, prec_3d, window_3d, avg_auc_3d, avg_ap_3d = metricor().RangeAUC_volume(labels_original=label, score=score, windowSize=1*slidingWindow)\n\n    X = np.array(tpr_3d).reshape(1,-1).ravel()\n    X_ap = np.array(tpr_3d)[:,:-1].reshape(1,-1).ravel()\n    Y = np.array(fpr_3d).reshape(1,-1).ravel()\n    W = np.array(prec_3d).reshape(1,-1).ravel()\n    Z = np.repeat(window_3d, len(tpr_3d[0]))\n    Z_ap = np.repeat(window_3d, len(tpr_3d[0])-1)\n    \n    return Y, Z, X, X_ap, W, Z_ap,avg_auc_3d, avg_ap_3d\n        \n        "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/utils/slidingWindows.py",
    "content": "from statsmodels.tsa.stattools import acf\nfrom scipy.signal import argrelextrema\nimport numpy as np\n\nimport matplotlib.patches as mpatches \nimport matplotlib.pyplot as plt\n# determine sliding window (period) based on ACF\ndef find_length(data):\n    if len(data.shape)>1:\n        return 0\n    data = data[:min(20000, len(data))]\n    \n    base = 3\n    auto_corr = acf(data, nlags=400, fft=True)[base:]\n    \n    \n    local_max = argrelextrema(auto_corr, np.greater)[0]\n    try:\n        max_local_max = np.argmax([auto_corr[lcm] for lcm in local_max])\n        if local_max[max_local_max]<3 or local_max[max_local_max]>300:\n            return 125\n        return local_max[max_local_max]+base\n    except:\n        return 125"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/AnomalyTransformer.py",
    "content": "import numpy as np\nimport math\nfrom math import sqrt\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import weight_norm\nimport math\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, max_len=5000):\n        super(PositionalEmbedding, self).__init__()\n        # Compute the positional encodings once in log space.\n        pe = torch.zeros(max_len, d_model).float()\n        pe.require_grad = False\n\n        position = torch.arange(0, max_len).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()\n\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n\n        pe = pe.unsqueeze(0)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        return self.pe[:, :x.size(1)]\n\n\nclass TokenEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(TokenEmbedding, self).__init__()\n        padding = 1 if torch.__version__ >= '1.5.0' else 2\n        self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,\n                                   kernel_size=3, padding=padding, padding_mode='circular', bias=False)\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu')\n\n    def forward(self, x):\n        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)\n        return x\n\n\nclass DataEmbedding(nn.Module):\n    def __init__(self, c_in, d_model, dropout=0.0):\n        super(DataEmbedding, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x):\n        x = self.value_embedding(x) + self.position_embedding(x)\n        return self.dropout(x)\n\n\n\nclass TriangularCausalMask():\n    def __init__(self, B, L, device=\"cpu\"):\n        mask_shape = [B, 1, L, L]\n        with torch.no_grad():\n            self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)\n\n    @property\n    def mask(self):\n        return self._mask\n\n\nclass AnomalyAttention(nn.Module):\n    def __init__(self, win_size, mask_flag=True, scale=None, attention_dropout=0.0, output_attention=False, cud_device=None):\n        super(AnomalyAttention, self).__init__()\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n        self.device = cud_device\n        window_size = win_size\n        self.distances = torch.zeros((window_size, window_size)).to(self.device)\n        for i in range(window_size):\n            for j in range(window_size):\n                self.distances[i][j] = abs(i - j)\n\n    def forward(self, queries, keys, values, sigma, attn_mask):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        scale = self.scale or 1. / sqrt(E)\n\n        scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys)\n        if self.mask_flag:\n            if attn_mask is None:\n                attn_mask = TriangularCausalMask(B, L, device=queries.device)\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n        attn = scale * scores\n\n        sigma = sigma.transpose(1, 2)  # B L H ->  B H L\n        window_size = attn.shape[-1]\n        sigma = torch.sigmoid(sigma * 5) + 1e-5\n        sigma = torch.pow(3, sigma) - 1\n        sigma = sigma.unsqueeze(-1).repeat(1, 1, 1, window_size)  # B H L L\n        prior = self.distances.unsqueeze(0).unsqueeze(0).repeat(sigma.shape[0], sigma.shape[1], 1, 1).to(self.device)\n        prior = 1.0 / (math.sqrt(2 * math.pi) * sigma) * torch.exp(-prior ** 2 / 2 / (sigma ** 2))\n\n        series = self.dropout(torch.softmax(attn, dim=-1))\n        V = torch.einsum(\"bhls,bshd->blhd\", series, values)\n\n        if self.output_attention:\n            return (V.contiguous(), series, prior, sigma)\n        else:\n            return (V.contiguous(), None)\n\n\nclass AttentionLayer(nn.Module):\n    def __init__(self, attention, d_model, n_heads, d_keys=None,\n                 d_values=None):\n        super(AttentionLayer, self).__init__()\n\n        d_keys = d_keys or (d_model // n_heads)\n        d_values = d_values or (d_model // n_heads)\n        self.norm = nn.LayerNorm(d_model)\n        self.inner_attention = attention\n        self.query_projection = nn.Linear(d_model,\n                                          d_keys * n_heads)\n        self.key_projection = nn.Linear(d_model,\n                                        d_keys * n_heads)\n        self.value_projection = nn.Linear(d_model,\n                                          d_values * n_heads)\n        self.sigma_projection = nn.Linear(d_model,\n                                          n_heads)\n        self.out_projection = nn.Linear(d_values * n_heads, d_model)\n\n        self.n_heads = n_heads\n\n    def forward(self, queries, keys, values, attn_mask):\n        B, L, _ = queries.shape\n        _, S, _ = keys.shape\n        H = self.n_heads\n        x = queries\n        queries = self.query_projection(queries).view(B, L, H, -1)\n        keys = self.key_projection(keys).view(B, S, H, -1)\n        values = self.value_projection(values).view(B, S, H, -1)\n        sigma = self.sigma_projection(x).view(B, L, H)\n\n        out, series, prior, sigma = self.inner_attention(\n            queries,\n            keys,\n            values,\n            sigma,\n            attn_mask\n        )\n        out = out.view(B, L, -1)\n\n        return self.out_projection(out), series, prior, sigma\n\n\n\nclass EncoderLayer(nn.Module):\n    def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation=\"relu\"):\n        super(EncoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.attention = attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, attn_mask=None):\n        new_x, attn, mask, sigma = self.attention(\n            x, x, x,\n            attn_mask=attn_mask\n        )\n        x = x + self.dropout(new_x)\n        y = x = self.norm1(x)\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n\n        return self.norm2(x + y), attn, mask, sigma\n\n\nclass Encoder(nn.Module):\n    def __init__(self, attn_layers, norm_layer=None):\n        super(Encoder, self).__init__()\n        self.attn_layers = nn.ModuleList(attn_layers)\n        self.norm = norm_layer\n\n    def forward(self, x, attn_mask=None):\n        # x [B, L, D]\n        series_list = []\n        prior_list = []\n        sigma_list = []\n        for attn_layer in self.attn_layers:\n            x, series, prior, sigma = attn_layer(x, attn_mask=attn_mask)\n            series_list.append(series)\n            prior_list.append(prior)\n            sigma_list.append(sigma)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        return x, series_list, prior_list, sigma_list\n\n\nclass AnomalyTransformer(nn.Module):\n    def __init__(self, win_size, enc_in, c_out, d_model=512, n_heads=8, e_layers=3, d_ff=512,\n                 dropout=0.0, activation='gelu', output_attention=True, cud_device=None):\n        super(AnomalyTransformer, self).__init__()\n        self.output_attention = output_attention\n\n        # Encoding\n        self.embedding = DataEmbedding(enc_in, d_model, dropout)\n\n        # Encoder\n        self.encoder = Encoder(\n            [\n                EncoderLayer(\n                    AttentionLayer(\n                        AnomalyAttention(win_size, False, attention_dropout=dropout, output_attention=output_attention, cud_device=cud_device),\n                        d_model, n_heads),\n                    d_model,\n                    d_ff,\n                    dropout=dropout,\n                    activation=activation\n                ) for l in range(e_layers)\n            ],\n            norm_layer=torch.nn.LayerNorm(d_model)\n        )\n\n        self.projection = nn.Linear(d_model, c_out, bias=True)\n\n    def forward(self, x):\n        enc_out = self.embedding(x)\n        enc_out, series, prior, sigmas = self.encoder(enc_out)\n        enc_out = self.projection(enc_out)\n\n        if self.output_attention:\n            return enc_out, series, prior, sigmas\n        else:\n            return enc_out  # [B, L, D]\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/DCdetector.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom einops import rearrange\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport math\nfrom math import sqrt\nimport os\nfrom einops import rearrange, reduce, repeat\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import weight_norm\nimport math\nfrom tkinter import _flatten\n\n\nclass DAC_structure(nn.Module):\n    def __init__(self, win_size, patch_size, channel, mask_flag=True, scale=None, attention_dropout=0.05,\n                 output_attention=False):\n        super(DAC_structure, self).__init__()\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n        self.window_size = win_size\n        self.patch_size = patch_size\n        self.channel = channel\n\n    def forward(self, queries_patch_size, queries_patch_num, keys_patch_size, keys_patch_num, values, patch_index,\n                attn_mask):\n\n        # Patch-wise Representation\n        B, L, H, E = queries_patch_size.shape  # batch_size*channel, patch_num, n_head, d_model/n_head\n        scale_patch_size = self.scale or 1. / sqrt(E)\n        scores_patch_size = torch.einsum(\"blhe,bshe->bhls\", queries_patch_size,\n                                         keys_patch_size)  # batch*ch, nheads, p_num, p_num\n        attn_patch_size = scale_patch_size * scores_patch_size\n        series_patch_size = self.dropout(torch.softmax(attn_patch_size, dim=-1))  # B*D_model H N N\n\n        # In-patch Representation\n        B, L, H, E = queries_patch_num.shape  # batch_size*channel, patch_size, n_head, d_model/n_head\n        scale_patch_num = self.scale or 1. / sqrt(E)\n        scores_patch_num = torch.einsum(\"blhe,bshe->bhls\", queries_patch_num,\n                                        keys_patch_num)  # batch*ch, nheads, p_size, p_size\n        attn_patch_num = scale_patch_num * scores_patch_num\n        series_patch_num = self.dropout(torch.softmax(attn_patch_num, dim=-1))  # B*D_model H S S\n\n        # Upsampling\n        series_patch_size = repeat(series_patch_size, 'b l m n -> b l (m repeat_m) (n repeat_n)',\n                                   repeat_m=self.patch_size[patch_index], repeat_n=self.patch_size[patch_index])\n        series_patch_num = series_patch_num.repeat(1, 1, self.window_size // self.patch_size[patch_index],\n                                                   self.window_size // self.patch_size[patch_index])\n        series_patch_size = reduce(series_patch_size, '(b reduce_b) l m n-> b l m n', 'mean', reduce_b=self.channel)\n        series_patch_num = reduce(series_patch_num, '(b reduce_b) l m n-> b l m n', 'mean', reduce_b=self.channel)\n\n        if self.output_attention:\n            return series_patch_size, series_patch_num\n        else:\n            return (None)\n\n\nclass AttentionLayer(nn.Module):\n    def __init__(self, attention, d_model, patch_size, channel, n_heads, win_size, d_keys=None, d_values=None):\n        super(AttentionLayer, self).__init__()\n\n        d_keys = d_keys or (d_model // n_heads)\n        d_values = d_values or (d_model // n_heads)\n        self.norm = nn.LayerNorm(d_model)\n        self.inner_attention = attention\n        self.patch_size = patch_size\n        self.channel = channel\n        self.window_size = win_size\n        self.n_heads = n_heads\n\n        self.patch_query_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.patch_key_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.out_projection = nn.Linear(d_values * n_heads, d_model)\n        self.value_projection = nn.Linear(d_model, d_values * n_heads)\n\n    def forward(self, x_patch_size, x_patch_num, x_ori, patch_index, attn_mask):\n        # patch_size\n        B, L, M = x_patch_size.shape\n        H = self.n_heads\n        queries_patch_size, keys_patch_size = x_patch_size, x_patch_size\n        queries_patch_size = self.patch_query_projection(queries_patch_size).view(B, L, H, -1)\n        keys_patch_size = self.patch_key_projection(keys_patch_size).view(B, L, H, -1)\n\n        # patch_num\n        B, L, M = x_patch_num.shape\n        queries_patch_num, keys_patch_num = x_patch_num, x_patch_num\n        queries_patch_num = self.patch_query_projection(queries_patch_num).view(B, L, H, -1)\n        keys_patch_num = self.patch_key_projection(keys_patch_num).view(B, L, H, -1)\n\n        # x_ori\n        B, L, _ = x_ori.shape\n        values = self.value_projection(x_ori).view(B, L, H, -1)\n\n        series, prior = self.inner_attention(\n            queries_patch_size, queries_patch_num,\n            keys_patch_size, keys_patch_num,\n            values, patch_index,\n            attn_mask\n        )\n\n        return series, prior\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, max_len=5000):\n        super(PositionalEmbedding, self).__init__()\n        # Compute the positional encodings once in log space.\n        pe = torch.zeros(max_len, d_model).float()\n        pe.require_grad = False\n\n        position = torch.arange(0, max_len).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()\n\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n\n        pe = pe.unsqueeze(0)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        return self.pe[:, :x.size(1)]\n\n\nclass TokenEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(TokenEmbedding, self).__init__()\n        padding = 1 if torch.__version__ >= '1.5.0' else 2\n        self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,\n                                   kernel_size=3, padding=padding, padding_mode='circular', bias=False)\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu')\n\n    def forward(self, x):\n        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)\n        return x\n\n\nclass DataEmbedding(nn.Module):\n    def __init__(self, c_in, d_model, dropout=0.05):\n        super(DataEmbedding, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x):\n        x = self.value_embedding(x) + self.position_embedding(x)\n        return self.dropout(x)\n\n\nclass RevIN(nn.Module):\n    def __init__(self, num_features: int, eps=1e-5, affine=True):\n        \"\"\"\n        :param num_features: the number of features or channels\n        :param eps: a value added for numerical stability\n        :param affine: if True, RevIN has learnable affine parameters\n        \"\"\"\n        super(RevIN, self).__init__()\n        self.num_features = num_features\n        self.eps = eps\n        self.affine = affine\n        if self.affine:\n            self._init_params()\n\n    def forward(self, x, mode: str):\n        if mode == 'norm':\n            self._get_statistics(x)\n            x = self._normalize(x)\n        elif mode == 'denorm':\n            x = self._denormalize(x)\n        else:\n            raise NotImplementedError\n        return x\n\n    def _init_params(self):\n        # initialize RevIN params: (C,)\n        self.affine_weight = torch.ones(self.num_features)\n        self.affine_bias = torch.zeros(self.num_features)\n        self.affine_weight = self.affine_weight.to(\n            device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'))\n        self.affine_bias = self.affine_bias.to(device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'))\n\n    def _get_statistics(self, x):\n        dim2reduce = tuple(range(1, x.ndim - 1))\n        self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()\n        self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()\n\n    def _normalize(self, x):\n        x = x - self.mean\n        x = x / self.stdev\n        if self.affine:\n            x = x * self.affine_weight\n            x = x + self.affine_bias\n        return x\n\n    def _denormalize(self, x):\n        if self.affine:\n            x = x - self.affine_bias\n            x = x / (self.affine_weight + self.eps * self.eps)\n        x = x * self.stdev\n        x = x + self.mean\n        return x\n\n\nclass Encoder(nn.Module):\n    def __init__(self, attn_layers, norm_layer=None):\n        super(Encoder, self).__init__()\n        self.attn_layers = nn.ModuleList(attn_layers)\n        self.norm = norm_layer\n\n    def forward(self, x_patch_size, x_patch_num, x_ori, patch_index, attn_mask=None):\n        series_list = []\n        prior_list = []\n        for attn_layer in self.attn_layers:\n            series, prior = attn_layer(x_patch_size, x_patch_num, x_ori, patch_index, attn_mask=attn_mask)\n            series_list.append(series)\n            prior_list.append(prior)\n        return series_list, prior_list\n\n\nclass DCdetector(nn.Module):\n    def __init__(self, win_size, enc_in, c_out, n_heads=1, d_model=256, e_layers=3, patch_size=[3, 5, 7], channel=55,\n                 d_ff=512, dropout=0.0, activation='gelu', output_attention=True):\n        super(DCdetector, self).__init__()\n        self.output_attention = output_attention\n        self.patch_size = patch_size\n        self.channel = channel\n        self.win_size = win_size\n\n        # Patching List\n        self.embedding_patch_size = nn.ModuleList()\n        self.embedding_patch_num = nn.ModuleList()\n        for i, patchsize in enumerate(self.patch_size):\n            self.embedding_patch_size.append(DataEmbedding(patchsize, d_model, dropout))\n            self.embedding_patch_num.append(DataEmbedding(self.win_size // patchsize, d_model, dropout))\n\n        self.embedding_window_size = DataEmbedding(enc_in, d_model, dropout)\n\n        # Dual Attention Encoder\n        self.encoder = Encoder(\n            [\n                AttentionLayer(\n                    DAC_structure(win_size, patch_size, channel, False, attention_dropout=dropout,\n                                  output_attention=output_attention),\n                    d_model, patch_size, channel, n_heads, win_size) for l in range(e_layers)\n            ],\n            norm_layer=torch.nn.LayerNorm(d_model)\n        )\n\n        self.projection = nn.Linear(d_model, c_out, bias=True)\n\n    def forward(self, x):\n        B, L, M = x.shape  # Batch win_size channel\n        series_patch_mean = []\n        prior_patch_mean = []\n        revin_layer = RevIN(num_features=M)\n\n        # Instance Normalization Operation\n        x = revin_layer(x, 'norm')\n        x_ori = self.embedding_window_size(x)\n\n        # Mutil-scale Patching Operation\n        for patch_index, patchsize in enumerate(self.patch_size):\n            x_patch_size, x_patch_num = x, x\n            x_patch_size = rearrange(x_patch_size, 'b l m -> b m l')  # Batch channel win_size\n            x_patch_num = rearrange(x_patch_num, 'b l m -> b m l')  # Batch channel win_size\n\n            x_patch_size = rearrange(x_patch_size, 'b m (n p) -> (b m) n p', p=patchsize)\n            x_patch_size = self.embedding_patch_size[patch_index](x_patch_size)\n            x_patch_num = rearrange(x_patch_num, 'b m (p n) -> (b m) p n', p=patchsize)\n            x_patch_num = self.embedding_patch_num[patch_index](x_patch_num)\n\n            series, prior = self.encoder(x_patch_size, x_patch_num, x_ori, patch_index)\n            series_patch_mean.append(series), prior_patch_mean.append(prior)\n\n        series_patch_mean = list(_flatten(series_patch_mean))\n        prior_patch_mean = list(_flatten(prior_patch_mean))\n\n        if self.output_attention:\n            return series_patch_mean, prior_patch_mean\n        else:\n            return None\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/GPT4TS.py",
    "content": "import torch.nn.functional as F\n\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Model\nfrom einops import rearrange\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, max_len=5000):\n        super(PositionalEmbedding, self).__init__()\n        # Compute the positional encodings once in log space.\n        pe = torch.zeros(max_len, d_model).float()\n        pe.require_grad = False\n\n        position = torch.arange(0, max_len).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n\n        pe = pe.unsqueeze(0)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        return self.pe[:, :x.size(1)]\n\n\nclass TokenEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(TokenEmbedding, self).__init__()\n        padding = 1 if torch.__version__ >= '1.5.0' else 2\n        self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,\n                                   kernel_size=3, padding=padding, padding_mode='circular', bias=False)\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.kaiming_normal_(\n                    m.weight, mode='fan_in', nonlinearity='leaky_relu')\n\n    def forward(self, x):\n        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)\n        return x\n\n\nclass FixedEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(FixedEmbedding, self).__init__()\n\n        w = torch.zeros(c_in, d_model).float()\n        w.require_grad = False\n\n        position = torch.arange(0, c_in).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        w[:, 0::2] = torch.sin(position * div_term)\n        w[:, 1::2] = torch.cos(position * div_term)\n\n        self.emb = nn.Embedding(c_in, d_model)\n        self.emb.weight = nn.Parameter(w, requires_grad=False)\n\n    def forward(self, x):\n        return self.emb(x).detach()\n\n\nclass TemporalEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='fixed', freq='h'):\n        super(TemporalEmbedding, self).__init__()\n\n        minute_size = 4\n        hour_size = 24\n        weekday_size = 7\n        day_size = 32\n        month_size = 13\n\n        Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding\n        if freq == 't':\n            self.minute_embed = Embed(minute_size, d_model)\n        self.hour_embed = Embed(hour_size, d_model)\n        self.weekday_embed = Embed(weekday_size, d_model)\n        self.day_embed = Embed(day_size, d_model)\n        self.month_embed = Embed(month_size, d_model)\n\n    def forward(self, x):\n        x = x.long()\n        minute_x = self.minute_embed(x[:, :, 4]) if hasattr(\n            self, 'minute_embed') else 0.\n        hour_x = self.hour_embed(x[:, :, 3])\n        weekday_x = self.weekday_embed(x[:, :, 2])\n        day_x = self.day_embed(x[:, :, 1])\n        month_x = self.month_embed(x[:, :, 0])\n\n        return hour_x + weekday_x + day_x + month_x + minute_x\n\n\nclass TimeFeatureEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='timeF', freq='h'):\n        super(TimeFeatureEmbedding, self).__init__()\n\n        freq_map = {'h': 4, 't': 5, 's': 6,\n                    'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3}\n        d_inp = freq_map[freq]\n        self.embed = nn.Linear(d_inp, d_model, bias=False)\n\n    def forward(self, x):\n        return self.embed(x)\n\n\nclass DataEmbedding(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x) + self.position_embedding(x)\n        else:\n            x = self.value_embedding(\n                x) + self.temporal_embedding(x_mark) + self.position_embedding(x)\n        return self.dropout(x)\n\n\nclass DataEmbedding_wo_pos(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_wo_pos, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x)\n        else:\n            x = self.value_embedding(x) + self.temporal_embedding(x_mark)\n        return self.dropout(x)\n\nclass Model(nn.Module):\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        self.is_ln = configs.ln\n        self.task_name = configs.task_name\n        self.pred_len = configs.pred_len\n        self.seq_len = configs.seq_len\n        self.patch_size = configs.patch_size\n        self.stride = configs.stride\n        self.seq_len = configs.seq_len\n        self.d_ff = configs.d_ff\n        self.patch_num = (configs.seq_len + self.pred_len - self.patch_size) // self.stride + 1\n\n        self.padding_patch_layer = nn.ReplicationPad1d((0, self.stride))\n        self.patch_num += 1\n        self.enc_embedding = DataEmbedding(configs.enc_in * self.patch_size, configs.d_model, configs.embed,\n                                           configs.freq,\n                                           configs.dropout)\n\n        # self.gpt2 = GPT2Model.from_pretrained('gpt2', output_attentions=True, output_hidden_states=True)\n        import os\n        # 检查路径是否存在，如果不存在则赋值为新的路径\n        if not os.path.exists(\"/dev_data/lz/gpt2\"):\n            self.gpt2 = GPT2Model.from_pretrained('/SSD/lz/gpt2', output_attentions=True, output_hidden_states=True)\n        else:\n            self.gpt2 = GPT2Model.from_pretrained('/dev_data/lz/gpt2', output_attentions=True,\n                                                  output_hidden_states=True)\n        self.gpt2.h = self.gpt2.h[:configs.gpt_layers]\n\n        for i, (name, param) in enumerate(self.gpt2.named_parameters()):\n            if 'ln' in name or 'wpe' in name:  # or 'mlp' in name:\n                param.requires_grad = True\n            elif 'mlp' in name and configs.mlp == 1:\n                param.requires_grad = True\n            else:\n                param.requires_grad = False\n\n        if configs.use_gpu:\n            device = torch.device('cuda:{}'.format(0))\n            self.gpt2.to(device=device)\n\n        # self.in_layer = nn.Linear(configs.patch_size, configs.d_model)\n\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            self.predict_linear_pre = nn.Linear(self.seq_len, self.pred_len + self.seq_len)\n            self.predict_linear = nn.Linear(self.patch_size, configs.enc_in)\n            self.ln = nn.LayerNorm(configs.d_ff)\n            self.out_layer = nn.Linear(configs.d_ff, configs.c_out)\n        if self.task_name == 'imputation':\n            self.ln_proj = nn.LayerNorm(configs.d_model)\n            self.out_layer = nn.Linear(\n                configs.d_model,\n                configs.c_out,\n                bias=True)\n        if self.task_name == 'anomaly_detection':\n            self.ln_proj = nn.LayerNorm(configs.d_ff)\n            self.out_layer = nn.Linear(\n                configs.d_ff,\n                configs.c_out,\n                bias=True)\n        if self.task_name == 'classification':\n            self.act = F.gelu\n            self.dropout = nn.Dropout(0.1)\n            self.ln_proj = nn.LayerNorm(configs.d_model * self.patch_num)\n            self.out_layer = nn.Linear(configs.d_model * self.patch_num, configs.num_class)\n\n    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(\n                x_enc, x_mark_enc, x_dec, x_mark_dec, mask)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc, x_mark_enc)\n            return dec_out  # [B, N]\n        return None\n\n    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):\n        B, L, M = x_enc.shape\n        # Normalization from Non-stationary Transformer\n        means = torch.sum(x_enc, dim=1) / torch.sum(mask == 1, dim=1)\n        means = means.unsqueeze(1).detach()\n        x_enc = x_enc - means\n        x_enc = x_enc.masked_fill(mask == 0, 0)\n        stdev = torch.sqrt(torch.sum(x_enc * x_enc, dim=1) /\n                           torch.sum(mask == 1, dim=1) + 1e-5)\n        stdev = stdev.unsqueeze(1).detach()\n        x_enc /= stdev\n\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)  # [B,T,C]\n\n        outputs = self.gpt2(inputs_embeds=enc_out).last_hidden_state\n\n        outputs = self.ln_proj(outputs)\n        dec_out = self.out_layer(outputs)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        B, L, M = x_enc.shape\n\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(\n            torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)  # [B,T,C]\n        enc_out = self.predict_linear_pre(enc_out.permute(0, 2, 1)).permute(\n            0, 2, 1)  # align temporal dimension\n        enc_out = torch.nn.functional.pad(enc_out, (0, 768 - enc_out.shape[-1]))\n\n        # enc_out = rearrange(enc_out, 'b l m -> b m l')\n        # enc_out = self.padding_patch_layer(enc_out)\n        # enc_out = enc_out.unfold(dimension=-1, size=self.patch_size, step=self.stride)\n        # enc_out = self.predict_linear(enc_out)\n        # enc_out = rearrange(enc_out, 'b m n p -> b n (m p)')\n\n        dec_out = self.gpt2(inputs_embeds=enc_out).last_hidden_state\n        dec_out = dec_out[:, :, :self.d_ff]\n        # dec_out = dec_out.reshape(B, -1)\n\n        # dec_out = self.ln(dec_out)\n        dec_out = self.out_layer(dec_out)\n        # print(dec_out.shape)\n        # dec_out = dec_out.reshape(B, self.pred_len + self.seq_len, -1)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n\n        return dec_out\n\n    def anomaly_detection(self, x_enc):\n        B, L, M = x_enc.shape\n\n        # Normalization from Non-stationary Transformer\n\n        seg_num = 25\n        x_enc = rearrange(x_enc, 'b (n s) m -> b n s m', s=seg_num)\n        means = x_enc.mean(2, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(\n            torch.var(x_enc, dim=2, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n        x_enc = rearrange(x_enc, 'b n s m -> b (n s) m')\n\n        # means = x_enc.mean(1, keepdim=True).detach()\n        # x_enc = x_enc - means\n        # stdev = torch.sqrt(\n        #     torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        # x_enc /= stdev\n\n        # enc_out = self.enc_embedding(x_enc, None)  # [B,T,C]\n        enc_out = torch.nn.functional.pad(x_enc, (0, 768 - x_enc.shape[-1]))\n\n        outputs = self.gpt2(inputs_embeds=enc_out).last_hidden_state\n\n        outputs = outputs[:, :, :self.d_ff]\n        # outputs = self.ln_proj(outputs)\n        dec_out = self.out_layer(outputs)\n\n        # De-Normalization from Non-stationary Transformer\n\n        dec_out = rearrange(dec_out, 'b (n s) m -> b n s m', s=seg_num)\n        dec_out = dec_out * \\\n                  (stdev[:, :, 0, :].unsqueeze(2).repeat(\n                      1, 1, seg_num, 1))\n        dec_out = dec_out + \\\n                  (means[:, :, 0, :].unsqueeze(2).repeat(\n                      1, 1, seg_num, 1))\n        dec_out = rearrange(dec_out, 'b n s m -> b (n s) m')\n\n        # dec_out = dec_out * \\\n        #           (stdev[:, 0, :].unsqueeze(1).repeat(\n        #               1, self.pred_len + self.seq_len, 1))\n        # dec_out = dec_out + \\\n        #           (means[:, 0, :].unsqueeze(1).repeat(\n        #               1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def classification(self, x_enc, x_mark_enc):\n        # print(x_enc.shape)\n        B, L, M = x_enc.shape\n        input_x = rearrange(x_enc, 'b l m -> b m l')\n        input_x = self.padding_patch_layer(input_x)\n        input_x = input_x.unfold(dimension=-1, size=self.patch_size, step=self.stride)\n        input_x = rearrange(input_x, 'b m n p -> b n (p m)')\n\n        outputs = self.enc_embedding(input_x, None)\n\n        outputs = self.gpt2(inputs_embeds=outputs).last_hidden_state\n\n        outputs = self.act(outputs).reshape(B, -1)\n        outputs = self.ln_proj(outputs)\n        # outputs = self.dropout(outputs)\n        outputs = self.out_layer(outputs)\n\n        return outputs\n\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/TimesNet.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft\nimport math\n\n\nclass Inception_Block_V1(nn.Module):\n    def __init__(self, in_channels, out_channels, num_kernels=6, init_weight=True):\n        super(Inception_Block_V1, self).__init__()\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_kernels = num_kernels\n        kernels = []\n        for i in range(self.num_kernels):\n            kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=2 * i + 1, padding=i))\n        self.kernels = nn.ModuleList(kernels)\n        if init_weight:\n            self._initialize_weights()\n\n    def _initialize_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n                if m.bias is not None:\n                    nn.init.constant_(m.bias, 0)\n\n    def forward(self, x):\n        res_list = []\n        for i in range(self.num_kernels):\n            res_list.append(self.kernels[i](x))\n        res = torch.stack(res_list, dim=-1).mean(-1)\n        return res\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, max_len=5000):\n        super(PositionalEmbedding, self).__init__()\n        # Compute the positional encodings once in log space.\n        pe = torch.zeros(max_len, d_model).float()\n        pe.require_grad = False\n\n        position = torch.arange(0, max_len).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n\n        pe = pe.unsqueeze(0)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        return self.pe[:, :x.size(1)]\n\n\nclass TokenEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(TokenEmbedding, self).__init__()\n        padding = 1 if torch.__version__ >= '1.5.0' else 2\n        self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,\n                                   kernel_size=3, padding=padding, padding_mode='circular', bias=False)\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.kaiming_normal_(\n                    m.weight, mode='fan_in', nonlinearity='leaky_relu')\n\n    def forward(self, x):\n        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)\n        return x\n\n\nclass FixedEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(FixedEmbedding, self).__init__()\n\n        w = torch.zeros(c_in, d_model).float()\n        w.require_grad = False\n\n        position = torch.arange(0, c_in).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        w[:, 0::2] = torch.sin(position * div_term)\n        w[:, 1::2] = torch.cos(position * div_term)\n\n        self.emb = nn.Embedding(c_in, d_model)\n        self.emb.weight = nn.Parameter(w, requires_grad=False)\n\n    def forward(self, x):\n        return self.emb(x).detach()\n\n\nclass TemporalEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='fixed', freq='h'):\n        super(TemporalEmbedding, self).__init__()\n\n        minute_size = 4\n        hour_size = 24\n        weekday_size = 7\n        day_size = 32\n        month_size = 13\n\n        Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding\n        if freq == 't':\n            self.minute_embed = Embed(minute_size, d_model)\n        self.hour_embed = Embed(hour_size, d_model)\n        self.weekday_embed = Embed(weekday_size, d_model)\n        self.day_embed = Embed(day_size, d_model)\n        self.month_embed = Embed(month_size, d_model)\n\n    def forward(self, x):\n        x = x.long()\n        minute_x = self.minute_embed(x[:, :, 4]) if hasattr(\n            self, 'minute_embed') else 0.\n        hour_x = self.hour_embed(x[:, :, 3])\n        weekday_x = self.weekday_embed(x[:, :, 2])\n        day_x = self.day_embed(x[:, :, 1])\n        month_x = self.month_embed(x[:, :, 0])\n\n        return hour_x + weekday_x + day_x + month_x + minute_x\n\n\nclass TimeFeatureEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='timeF', freq='h'):\n        super(TimeFeatureEmbedding, self).__init__()\n\n        freq_map = {'h': 4, 't': 5, 's': 6,\n                    'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3}\n        d_inp = freq_map[freq]\n        self.embed = nn.Linear(d_inp, d_model, bias=False)\n\n    def forward(self, x):\n        return self.embed(x)\n\n\nclass DataEmbedding(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x) + self.position_embedding(x)\n        else:\n            x = self.value_embedding(\n                x) + self.temporal_embedding(x_mark) + self.position_embedding(x)\n        return self.dropout(x)\n\n\n\ndef FFT_for_Period(x, k=2):\n    # [B, T, C]\n    xf = torch.fft.rfft(x, dim=1)\n    # find period by amplitudes\n    frequency_list = abs(xf).mean(0).mean(-1)\n    frequency_list[0] = 0\n    _, top_list = torch.topk(frequency_list, k)\n    top_list = top_list.detach().cpu().numpy()\n    period = x.shape[1] // top_list\n    return period, abs(xf).mean(-1)[:, top_list]\n\n\nclass TimesBlock(nn.Module):\n    def __init__(self, configs):\n        super(TimesBlock, self).__init__()\n        self.seq_len = configs.seq_len\n        self.pred_len = configs.pred_len\n        self.k = configs.top_k\n        # parameter-efficient design\n        self.conv = nn.Sequential(\n            Inception_Block_V1(configs.d_model, configs.d_ff,\n                               num_kernels=configs.num_kernels),\n            nn.GELU(),\n            Inception_Block_V1(configs.d_ff, configs.d_model,\n                               num_kernels=configs.num_kernels)\n        )\n\n    def forward(self, x):\n        B, T, N = x.size()\n        period_list, period_weight = FFT_for_Period(x, self.k)\n\n        res = []\n        for i in range(self.k):\n            period = period_list[i]\n            # padding\n            if (self.seq_len + self.pred_len) % period != 0:\n                length = (\n                                 ((self.seq_len + self.pred_len) // period) + 1) * period\n                padding = torch.zeros([x.shape[0], (length - (self.seq_len + self.pred_len)), x.shape[2]]).to(x.device)\n                out = torch.cat([x, padding], dim=1)\n            else:\n                length = (self.seq_len + self.pred_len)\n                out = x\n            # reshape\n            out = out.reshape(B, length // period, period,\n                              N).permute(0, 3, 1, 2).contiguous()\n            # 2D conv: from 1d Variation to 2d Variation\n            out = self.conv(out)\n            # reshape back\n            out = out.permute(0, 2, 3, 1).reshape(B, -1, N)\n            res.append(out[:, :(self.seq_len + self.pred_len), :])\n        res = torch.stack(res, dim=-1)\n        # adaptive aggregation\n        period_weight = F.softmax(period_weight, dim=1)\n        period_weight = period_weight.unsqueeze(\n            1).unsqueeze(1).repeat(1, T, N, 1)\n        res = torch.sum(res * period_weight, -1)\n        # residual connection\n        res = res + x\n        return res\n\n\nclass Model(nn.Module):\n    \"\"\"\n    Paper link: https://openreview.net/pdf?id=ju_Uqw384Oq\n    \"\"\"\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        self.configs = configs\n        self.task_name = configs.task_name\n        self.seq_len = configs.seq_len\n        self.label_len = configs.label_len\n        self.pred_len = configs.pred_len\n        self.model = nn.ModuleList([TimesBlock(configs)\n                                    for _ in range(configs.e_layers)])\n        self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,\n                                           configs.dropout)\n        self.layer = configs.e_layers\n        self.layer_norm = nn.LayerNorm(configs.d_model)\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            self.predict_linear = nn.Linear(\n                self.seq_len, self.pred_len + self.seq_len)\n            self.projection = nn.Linear(\n                configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'imputation' or self.task_name == 'anomaly_detection':\n            self.projection = nn.Linear(\n                configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'classification':\n            self.act = F.gelu\n            self.dropout = nn.Dropout(configs.dropout)\n            self.projection = nn.Linear(\n                configs.d_model * configs.seq_len, configs.num_class)\n\n    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(\n            torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)  # [B,T,C]\n        enc_out = self.predict_linear(enc_out.permute(0, 2, 1)).permute(\n            0, 2, 1)  # align temporal dimension\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):\n        # Normalization from Non-stationary Transformer\n        means = torch.sum(x_enc, dim=1) / torch.sum(mask == 1, dim=1)\n        means = means.unsqueeze(1).detach()\n        x_enc = x_enc - means\n        x_enc = x_enc.masked_fill(mask == 0, 0)\n        stdev = torch.sqrt(torch.sum(x_enc * x_enc, dim=1) /\n                           torch.sum(mask == 1, dim=1) + 1e-5)\n        stdev = stdev.unsqueeze(1).detach()\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def anomaly_detection(self, x_enc):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(\n            torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, None)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def classification(self, x_enc, x_mark_enc):\n        # embedding\n        enc_out = self.enc_embedding(x_enc, None)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n\n        # Output\n        # the output transformer encoder/decoder embeddings don't include non-linearity\n        output = self.act(enc_out)\n        output = self.dropout(output)\n        # zero-out padding embeddings\n        output = output * x_mark_enc.unsqueeze(-1)\n        # (batch_size, seq_length * d_model)\n        output = output.reshape(output.shape[0], -1)\n        output = self.projection(output)  # (batch_size, num_classes)\n        return output\n\n    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(\n                x_enc, x_mark_enc, x_dec, x_mark_dec, mask)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc, x_mark_enc)\n            return dec_out  # [B, N]\n        return None\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/__init__.py",
    "content": "from .encoder import TSEncoder\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/dilated_conv.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass SamePadConv(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1):\n        super().__init__()\n        self.receptive_field = (kernel_size - 1) * dilation + 1\n        padding = self.receptive_field // 2\n        self.conv = nn.Conv1d(\n            in_channels, out_channels, kernel_size,\n            padding=padding,\n            dilation=dilation,\n            groups=groups\n        )\n        self.remove = 1 if self.receptive_field % 2 == 0 else 0\n        \n    def forward(self, x):\n        out = self.conv(x)\n        if self.remove > 0:\n            out = out[:, :, : -self.remove]\n        return out\n    \nclass ConvBlock(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation, final=False):\n        super().__init__()\n        self.conv1 = SamePadConv(in_channels, out_channels, kernel_size, dilation=dilation)\n        self.conv2 = SamePadConv(out_channels, out_channels, kernel_size, dilation=dilation)\n        self.projector = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels or final else None\n    \n    def forward(self, x):\n        residual = x if self.projector is None else self.projector(x)\n        x = F.gelu(x)\n        x = self.conv1(x)\n        x = F.gelu(x)\n        x = self.conv2(x)\n        return x + residual\n\nclass DilatedConvEncoder(nn.Module):\n    def __init__(self, in_channels, channels, kernel_size):\n        super().__init__()\n        self.net = nn.Sequential(*[\n            ConvBlock(\n                channels[i-1] if i > 0 else in_channels,\n                channels[i],\n                kernel_size=kernel_size,\n                dilation=2**i,\n                final=(i == len(channels)-1)\n            )\n            for i in range(len(channels))\n        ])\n        \n    def forward(self, x):\n        return self.net(x)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/donut_model.py",
    "content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass VariationalNet(nn.Module):\n    '''\n    Encodes the input by passing through the encoder network and returns the latent representations.\n    '''\n\n    def __init__(self, in_channel, latent_dim=100, hidden_dim=3):\n        super(VariationalNet, self).__init__()\n\n        self.latent_dim = latent_dim\n        self.hidden_dim = hidden_dim\n\n        self.encoder = nn.Sequential(\n            nn.Linear(in_channel, latent_dim),\n            nn.ReLU(),\n            nn.Linear(latent_dim, latent_dim),\n            nn.ReLU()\n            )\n\n        self.fc_mu = nn.Linear(latent_dim, hidden_dim)\n        self.fc_var = nn.Sequential(\n            nn.Linear(latent_dim, hidden_dim),\n            nn.Softplus()\n            )\n\n    def forward(self, inputs):\n        '''\n        Args: \n            inputs: [batch_size, max_length, in_channel]\n        Returns:\n            z_mu: [batch_size, max_length, hidden_dim]\n            z_log_var: [batch_size, max_length, hidden_dim]\n        '''\n        hidden_res = self.encoder(inputs) # [batch_size, max_length, latent_dim]\n        z_mu = self.fc_mu(hidden_res) # [batch_size, max_length, hidden_dim]\n        z_log_var = self.fc_var(hidden_res) + 1e-4 # [batch_size, max_length, hidden_dim]\n\n        return z_mu, z_log_var\n\n\nclass GenerativeNet(nn.Module):\n    '''\n    Maps the given latent representations through the decoder network onto the inputs space.\n    '''\n\n    def __init__(self, in_channel, latent_dim=100, hidden_dim=3):\n        super(GenerativeNet, self).__init__()\n\n        self.latent_dim = latent_dim\n        self.hidden_dim = hidden_dim\n\n        self.decoder = nn.Sequential(\n            nn.Linear(hidden_dim, latent_dim),\n            nn.ReLU(),\n            nn.Linear(latent_dim, latent_dim),\n            nn.ReLU()\n            )\n\n        self.fc_mu = nn.Linear(latent_dim, in_channel)\n        self.fc_var = nn.Sequential(\n            nn.Linear(latent_dim, in_channel),\n            nn.Softplus()\n            )\n\n    def forward(self, z):\n        '''\n        Args: \n            z: [batch_size, max_length, hidden_dim]\n        Returns:\n            x_mu: [batch_size, max_length, in_channel]\n            x_log_var: [batch_size, max_length, in_channel]\n        '''\n        hidden_res = self.decoder(z) # [batch_size, max_length, latent_dim]\n        x_mu = self.fc_mu(hidden_res) # [batch_size, max_length, in_channel]\n        x_log_var = self.fc_var(hidden_res) + 1e-4 # [batch_size, max_length, in_channel]\n\n        return x_mu, x_log_var\n\n\nclass DONUT_Model(nn.Module):\n\n    def __init__(self, in_channel, latent_dim=100, hidden_dim=3):\n        super(DONUT_Model, self).__init__()\n\n        self.in_channel = in_channel\n        self.latent_dim = latent_dim\n        self.hidden_dim = hidden_dim\n\n        self.Encoder = VariationalNet(self.in_channel, self.latent_dim, self.hidden_dim)\n        self.Decoder = GenerativeNet(self.in_channel, self.latent_dim, self.hidden_dim)\n\n\n    def reparameterize(self, mu, logvar):\n        \"\"\"\n        Reparameterization trick to sample from N(mu, var) from\n        N(0,1).\n        :param mu: (Tensor) Mean of the latent Gaussian [batch_size, max_length, hidden_dim]\n        :param logvar: (Tensor) Standard deviation of the latent Gaussian [batch_size, max_length, hidden_dim]\n        :return: (Tensor) [batch_size, max_length, hidden_dim]\n        \"\"\"\n        std = torch.exp(0.5 * logvar)\n        eps = torch.randn_like(std)\n        return eps * std + mu\n\n    def forward(self, inputs):\n        '''\n        Args:\n            inputs: [batch_size, max_length, in_channel]\n        Returns:\n            outputs: [batch_size, max_length, in_channel]\n            z_mu, z_log_var: [batch_size, max_length, hidden_dim]\n            x_mu, x_log_var: [batch_size, max_length, in_channel]\n        '''\n        z_mu, z_log_var = self.Encoder(inputs) \n        z = self.reparameterize(z_mu, z_log_var) # [batch_size, max_length, hidden_dim]\n\n        x_mu, x_log_var = self.Decoder(z)\n        outputs = self.reparameterize(x_mu, x_log_var) # [batch_size, max_length, in_channel]\n\n        return  outputs, z_mu, z_log_var, x_mu, x_log_var\n\n    def loss_function(self, inputs, outputs, z_mu, z_log_var, x_mu, x_log_var, z_kld_weight, x_kld_weight):\n        \"\"\"\n        Computes the VAE loss function.\n        KL(N(/mu, /sigma), N(0, 1)) = /log /frac{1}{/sigma} + /frac{/sigma^2 + /mu^2}{2} - /frac{1}{2}\n        Args:\n            inputs, outputs: [batch_size, max_length, in_channel]\n            z_mu, z_log_var: [batch_size, max_length, hidden_dim]\n            x_mu, x_log_var: [batch_size, max_length, in_channel]\n            z_kld_weight, x_kld_weight: float Value\n        \"\"\"\n        recons_loss = F.mse_loss(outputs, inputs)\n\n        _, _, hidden_dim = z_mu.size()\n        z_mu = z_mu.reshape(-1, hidden_dim)\n        z_log_var = z_log_var.reshape(-1, hidden_dim)\n        z_kld_loss = torch.mean(-0.5 * torch.sum(1 + z_log_var - z_mu ** 2 - z_log_var.exp(), dim = 1), dim = 0)\n\n        _, _, in_channel = x_mu.size()\n        x_mu = x_mu.reshape(-1, in_channel)\n        x_log_var = x_log_var.reshape(-1, in_channel)\n        x_kld_loss = torch.mean(-0.5 * torch.sum(1 + x_log_var - x_mu ** 2 - x_log_var.exp(), dim = 1), dim = 0)\n\n        loss = recons_loss + z_kld_weight * z_kld_loss + x_kld_weight * x_kld_loss\n\n        return loss"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/encoder.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom .dilated_conv import DilatedConvEncoder\n\ndef generate_continuous_mask(B, T, n=5, l=0.1):\n    res = torch.full((B, T), True, dtype=torch.bool)\n    if isinstance(n, float):\n        n = int(n * T)\n    n = max(min(n, T // 2), 1)\n    \n    if isinstance(l, float):\n        l = int(l * T)\n    l = max(l, 1)\n    \n    for i in range(B):\n        for _ in range(n):\n            t = np.random.randint(T-l+1)\n            res[i, t:t+l] = False\n    return res\n\ndef generate_binomial_mask(B, T, p=0.5):\n    return torch.from_numpy(np.random.binomial(1, p, size=(B, T))).to(torch.bool)\n\nclass TSEncoder(nn.Module):\n    def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, mask_mode='binomial'):\n        super().__init__()\n        self.input_dims = input_dims\n        self.output_dims = output_dims\n        self.hidden_dims = hidden_dims\n        self.mask_mode = mask_mode\n        self.input_fc = nn.Linear(input_dims, hidden_dims)\n        self.feature_extractor = DilatedConvEncoder(\n            hidden_dims,\n            [hidden_dims] * depth + [output_dims],\n            kernel_size=3\n        )\n        self.repr_dropout = nn.Dropout(p=0.1)\n        \n    def forward(self, x, mask=None):  # x: B x T x input_dims\n        nan_mask = ~x.isnan().any(axis=-1)\n        x[~nan_mask] = 0\n        x = self.input_fc(x)  # B x T x Ch\n        \n        # generate & apply mask\n        if mask is None:\n            if self.training:\n                mask = self.mask_mode\n            else:\n                mask = 'all_true'\n        \n        if mask == 'binomial':\n            mask = generate_binomial_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'continuous':\n            mask = generate_continuous_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'all_true':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n        elif mask == 'all_false':\n            mask = x.new_full((x.size(0), x.size(1)), False, dtype=torch.bool)\n        elif mask == 'mask_last':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n            mask[:, -1] = False\n        \n        mask &= nan_mask\n        x[~mask] = 0\n        \n        # conv encoder\n        x = x.transpose(1, 2)  # B x Ch x T\n        x = self.repr_dropout(self.feature_extractor(x))  # B x Co x T\n        x = x.transpose(1, 2)  # B x T x Co\n        \n        return x\n        "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/losses.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):\n    loss = torch.tensor(0., device=z1.device)\n    d = 0\n    while z1.size(1) > 1:\n        if alpha != 0:\n            loss += alpha * instance_contrastive_loss(z1, z2)\n        if d >= temporal_unit:\n            if 1 - alpha != 0:\n                loss += (1 - alpha) * temporal_contrastive_loss(z1, z2)\n        d += 1\n        z1 = F.max_pool1d(z1.transpose(1, 2), kernel_size=2).transpose(1, 2)\n        z2 = F.max_pool1d(z2.transpose(1, 2), kernel_size=2).transpose(1, 2)\n    if z1.size(1) == 1:\n        if alpha != 0:\n            loss += alpha * instance_contrastive_loss(z1, z2)\n        d += 1\n    return loss / d\n\ndef instance_contrastive_loss(z1, z2):\n    B, T = z1.size(0), z1.size(1)\n    if B == 1:\n        return z1.new_tensor(0.)\n    z = torch.cat([z1, z2], dim=0)  # 2B x T x C\n    z = z.transpose(0, 1)  # T x 2B x C\n    sim = torch.matmul(z, z.transpose(1, 2))  # T x 2B x 2B\n    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # T x 2B x (2B-1)\n    logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n    logits = -F.log_softmax(logits, dim=-1)\n    \n    i = torch.arange(B, device=z1.device)\n    loss = (logits[:, i, B + i - 1].mean() + logits[:, B + i, i].mean()) / 2\n    return loss\n\ndef temporal_contrastive_loss(z1, z2):\n    B, T = z1.size(0), z1.size(1)\n    if T == 1:\n        return z1.new_tensor(0.)\n    z = torch.cat([z1, z2], dim=1)  # B x 2T x C\n    sim = torch.matmul(z, z.transpose(1, 2))  # B x 2T x 2T\n    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # B x 2T x (2T-1)\n    logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n    logits = -F.log_softmax(logits, dim=-1)\n    \n    t = torch.arange(T, device=z1.device)\n    loss = (logits[:, t, T + t - 1].mean() + logits[:, T + t, t].mean()) / 2\n    return loss\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/lstm_vae_model.py",
    "content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass LSTM_Encoder(nn.Module):\n    '''\n    Encodes the input by passing through the encoder network and returns the latent representations.\n    '''\n\n    def __init__(self, device, in_channel, hidden_size=16, hidden_dim=3):\n        super(LSTM_Encoder, self).__init__()\n\n        self.device = device\n        self.hidden_size = hidden_size\n        self.hidden_dim = hidden_dim\n\n        self.encoder = nn.LSTM(input_size=in_channel, hidden_size=hidden_size, batch_first=True, bidirectional=True)\n\n        self.fc_mu = nn.Linear(2*hidden_size, hidden_dim)\n        self.fc_var = nn.Sequential(\n            nn.Linear(2*hidden_size, hidden_dim),\n            nn.Softplus()\n            )\n\n    def forward(self, inputs):\n        '''\n        Args: \n            inputs: [batch_size, max_length, in_channel]\n        Returns:\n            z_mu: [batch_size, max_length, hidden_dim]\n            z_log_var: [batch_size, max_length, hidden_dim]\n        '''\n        batch_size, _, _ = inputs.size()\n        h_0 = torch.zeros((2, batch_size, self.hidden_size), requires_grad=True).to(self.device)\n        c_0 = torch.zeros((2, batch_size, self.hidden_size), requires_grad=True).to(self.device)\n\n        # hidden_res: [batch_size, max_length, 2*hidden_size]\n        hidden_res, (h_n, c_n) = self.encoder(inputs, (h_0, c_0))\n        z_mu = self.fc_mu(hidden_res) # [batch_size, max_length, hidden_dim]\n        z_log_var = self.fc_var(hidden_res) + 1e-4 # [batch_size, max_length, hidden_dim]\n\n        return z_mu, z_log_var\n\n\nclass LSTM_Decoder(nn.Module):\n    '''\n    Maps the given latent representations through the decoder network onto the inputs space.\n    '''\n\n    def __init__(self, device, in_channel, hidden_size=16, hidden_dim=3):\n        super(LSTM_Decoder, self).__init__()\n\n        self.device = device\n        self.hidden_size = hidden_size\n        self.hidden_dim = hidden_dim\n\n        self.decoder = nn.LSTM(input_size=hidden_dim, hidden_size=hidden_size, batch_first=True, bidirectional=True)\n\n        self.fc_mu = nn.Linear(2*hidden_size, in_channel)\n        self.fc_var = nn.Sequential(\n            nn.Linear(2*hidden_size, in_channel),\n            nn.Softplus()\n            )\n\n    def forward(self, z):\n        '''\n        Args: \n            z: [batch_size, max_length, hidden_dim]\n        Returns:\n            x_mu: [batch_size, max_length, in_channel]\n            x_log_var: [batch_size, max_length, in_channel]\n        '''\n        batch_size, _, _ = z.size()\n        h_0 = torch.zeros((2, batch_size, self.hidden_size), requires_grad=True).to(self.device)\n        c_0 = torch.zeros((2, batch_size, self.hidden_size), requires_grad=True).to(self.device)\n\n        # hidden_res: [batch_size, max_length, 2*hidden_size]\n        hidden_res, (h_n, c_n) = self.decoder(z, (h_0, c_0))\n        x_mu = self.fc_mu(hidden_res) # [batch_size, max_length, in_channel]\n        x_log_var = self.fc_var(hidden_res) + 1e-4 # [batch_size, max_length, in_channel]\n\n        return x_mu, x_log_var\n\n\nclass LSTM_VAE_Model(nn.Module):\n\n    def __init__(self, device, in_channel, hidden_size=16, hidden_dim=3):\n        super(LSTM_VAE_Model, self).__init__()\n\n        self.device = device\n        self.in_channel = in_channel\n        self.hidden_size = hidden_size\n        self.hidden_dim = hidden_dim\n\n        self.Encoder = LSTM_Encoder(self.device, self.in_channel, self.hidden_size, self.hidden_dim)\n        self.Decoder = LSTM_Decoder(self.device, self.in_channel, self.hidden_size, self.hidden_dim)\n\n\n    def reparameterize(self, mu, logvar):\n        \"\"\"\n        Reparameterization trick to sample from N(mu, var) from\n        N(0,1).\n        :param mu: (Tensor) Mean of the latent Gaussian [batch_size, max_length, hidden_dim]\n        :param logvar: (Tensor) Standard deviation of the latent Gaussian [batch_size, max_length, hidden_dim]\n        :return: (Tensor) [batch_size, max_length, hidden_dim]\n        \"\"\"\n        std = torch.exp(0.5 * logvar)\n        eps = torch.randn_like(std)\n        return eps * std + mu\n\n    def forward(self, inputs):\n        '''\n        Args:\n            inputs: [batch_size, max_length, in_channel]\n        Returns:\n            outputs: [batch_size, max_length, in_channel]\n            z_mu, z_log_var: [batch_size, max_length, hidden_dim]\n            x_mu, x_log_var: [batch_size, max_length, in_channel]\n        '''\n        z_mu, z_log_var = self.Encoder(inputs) \n        z = self.reparameterize(z_mu, z_log_var) # [batch_size, max_length, hidden_dim]\n\n        x_mu, x_log_var = self.Decoder(z)\n        outputs = self.reparameterize(x_mu, x_log_var) # [batch_size, max_length, in_channel]\n\n        return  outputs, z_mu, z_log_var, x_mu, x_log_var\n\n    def loss_function(self, inputs, outputs, z_mu, z_log_var, x_mu, x_log_var, z_kld_weight, x_kld_weight):\n        \"\"\"\n        Computes the VAE loss function.\n        KL(N(/mu, /sigma), N(0, 1)) = /log /frac{1}{/sigma} + /frac{/sigma^2 + /mu^2}{2} - /frac{1}{2}\n        Args:\n            inputs, outputs: [batch_size, max_length, in_channel]\n            z_mu, z_log_var: [batch_size, max_length, hidden_dim]\n            x_mu, x_log_var: [batch_size, max_length, in_channel]\n            z_kld_weight, x_kld_weight: float Value\n        \"\"\"\n        recons_loss = F.mse_loss(outputs, inputs)\n\n        _, _, hidden_dim = z_mu.size()\n        z_mu = z_mu.reshape(-1, hidden_dim)\n        z_log_var = z_log_var.reshape(-1, hidden_dim)\n        z_kld_loss = torch.mean(-0.5 * torch.sum(1 + z_log_var - z_mu ** 2 - z_log_var.exp(), dim = 1), dim = 0)\n\n        _, _, in_channel = x_mu.size()\n        x_mu = x_mu.reshape(-1, in_channel)\n        x_log_var = x_log_var.reshape(-1, in_channel)\n        x_kld_loss = torch.mean(-0.5 * torch.sum(1 + x_log_var - x_mu ** 2 - x_log_var.exp(), dim = 1), dim = 0)\n\n        loss = recons_loss + z_kld_weight * z_kld_loss + x_kld_weight * x_kld_loss\n\n        return loss"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/new_dataset_read_test.py",
    "content": "from datasets.data_loader import get_loader_segment\n\n\n\n\nindex = 143\ndatapath = './datasets/'\n\ndataset_name = 'MSL' ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR\n\ndata_path = datapath + dataset_name + '/'\nbatch_size = 128\n\ndata_loader = get_loader_segment(index, data_path, batch_size, win_size=100, step=100, mode='train', dataset=dataset_name)\ndata_loader = get_loader_segment(index, data_path, batch_size, win_size=100, step=100, mode='val', dataset=dataset_name)\ndata_loader = get_loader_segment(index, data_path, batch_size, win_size=100, step=100, mode='test', dataset=dataset_name)\nprint(\"Read Success!!!\")"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/at_zeta0.sh",
    "content": "python train_lstm_vae_multi.py --dataset PSM --save_csv_name train_lstm_vae_multi_0717.csv --gpu 0;\npython train_donut_multi.py --dataset PSM --save_csv_name train_donut_multi_0717.csv --gpu 0;\npython train_lstm_vae_multi.py --dataset SWAT --save_csv_name train_lstm_vae_multi_0717.csv --gpu 0;\npython train_donut_multi.py --dataset SWAT --save_csv_name train_donut_multi_0717.csv --gpu 0;"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/at_zeta1.sh",
    "content": "python train_at_multi.py --anormly_ratio 0.5 --dataset SMD --save_csv_name train_at_multi_0719.csv --cuda cuda:0;\npython train_at_multi.py --anormly_ratio 1 --dataset MSL --save_csv_name train_at_multi_0719.csv --cuda cuda:0;\npython train_at_multi.py --anormly_ratio 0.85 --dataset SMAP --save_csv_name train_at_multi_0719.csv --cuda cuda:0;\npython train_at_multi.py --anormly_ratio 1 --dataset PSM --save_csv_name train_at_multi_0719.csv --cuda cuda:0;\npython train_at_multi.py --anormly_ratio 1 --dataset SWAT --save_csv_name train_at_multi_0719.csv --cuda cuda:0;\npython train_at_multi.py --anormly_ratio 0.9 --dataset NIPS_TS_Swan --save_csv_name train_at_multi_0719.csv --cuda cuda:0;\npython train_at_multi.py --anormly_ratio 1 --dataset NIPS_TS_Water --save_csv_name train_at_multi_0719.csv --cuda cuda:0;\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/generator_sh.py",
    "content": "\n\nuni_datasets = ['kpi', 'yahoo']\nmulti_datasets = ['SMD', 'MSL', 'SMAP', 'PSM', 'SWAT', 'NIPS_TS_Swan', 'NIPS_TS_Water']  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water , 'UCR'\n\n\n\n# code_main = 'main_gpt4ts_uea'   ## main_patchtst_ucr  main_gpt4ts_ucr  mian_patchtst\n\ncode_main_list = ['train_spot', 'train_dspot', 'train_lstm_vae', 'train_donut', 'train_ts2vec']\n\n\n# for dataset in uni_datasets:\n#     i = 1\n#     for code_main in code_main_list:\n#         print(\"i = \", i, \"dataset_name = \", dataset)\n#         i = i + 1\n#\n#         save_csv_name = code_main + '_0717.csv'  ##  --len_k\n#\n#         with open('/dev_data/lz/tsm_ptms_anomaly_detection/other_anomaly_baselines/scripts/uni_at.sh', 'a') as f:\n#             f.write('python ' + code_main + '.py '\n#                     '--dataset ' + dataset\n#                     +\n#                     ' --save_csv_name ' + save_csv_name + ' --gpu 0' + ';\\n')\n\n\n# for _index in range(1,251):\n#     i = 1\n#     for code_main in code_main_list:\n#         print(\"i = \", i, \"dataset_name = UCR\")\n#         i = i + 1\n#\n#         save_csv_name = code_main + '_ucr_0715.csv'  ##  --len_k\n#\n#         with open('/dev_data/lz/tsm_ptms_anomaly_detection/other_anomaly_baselines/scripts/ucr_at.sh', 'a') as f:\n#             f.write('python ' + code_main + '_multi.py '\n#                     '--dataset UCR --index ' + str(_index)\n#                     +\n#                     ' --save_csv_name ' + save_csv_name + ' --gpu 0' + ';\\n')\n\n# code_main_list = ['train_lstm_vae_multi', 'train_donut_multi', 'train_ts2vec_multi', 'train_dcdetector']\n# for dataset in multi_datasets:\n#     i = 1\n#     for code_main in code_main_list:\n#         print(\"i = \", i, \"dataset_name = \", dataset)\n#         i = i + 1\n#\n#         save_csv_name = code_main + '_0717.csv'  ##  --len_k\n#\n#         with open('/dev_data/lz/tsm_ptms_anomaly_detection/other_anomaly_baselines/scripts/multi_at.sh', 'a') as f:\n#             f.write('python ' + code_main + '.py '\n#                     '--dataset ' + dataset\n#                     +\n#                     ' --save_csv_name ' + save_csv_name + ' --gpu 0' + ';\\n')\n\n\n# code_main_list = ['train_timesnet', 'train_gpt4ts']\n# for dataset in multi_datasets:\n#     i = 1\n#     for code_main in code_main_list:\n#         print(\"i = \", i, \"dataset_name = \", dataset)\n#         i = i + 1\n#\n#         save_csv_name = code_main + '_0717.csv'  ##  --len_k\n#\n#         with open('/dev_data/lz/tsm_ptms_anomaly_detection/other_anomaly_baselines/scripts/multi_at.sh', 'a') as f:\n#             f.write('python ' + code_main + '.py '\n#                     '--data ' + dataset\n#                     +\n#                     ' --save_csv_name ' + save_csv_name + ' --gpu 0' + ';\\n')\n#\n\n# code_main_list = ['train_at_multi']  ## , 'train_gpt4ts'  train_timesnet  train_dcdetector  train_at_multi\n#\n# for _index in range(1,251):\n#     i = 1\n#     for code_main in code_main_list:\n#         print(\"i = \", i, \"dataset_name = UCR\")\n#         i = i + 1\n#\n#         save_csv_name = code_main + '_ucr_0719.csv'  ##  --len_k\n#\n#         with open('/dev_data/lz/tsm_ptms_anomaly_detection/other_anomaly_baselines/scripts/ucr_at_zeta0.sh', 'a') as f:\n#             f.write('python ' + code_main + '.py '\n#                     '--anormly_ratio 0.5 --dataset UCR --index ' + str(_index)\n#                     +\n#                     ' --save_csv_name ' + save_csv_name + ' --cuda cuda:0' + ';\\n')   ## anomaly_ratio  anormly_ratio  anormly_ratio\n\n\n# code_main_list = ['train_dcdetector_nui']  ## , 'train_gpt4ts'  train_timesnet  train_dcdetector  train_at_multi\n# ## train_gpt4ts_uni  train_timesnet_uni\n# for dataset in uni_datasets:\n#     i = 1\n#     for code_main in code_main_list:\n#         print(\"i = \", i, \"dataset_name = UCR\")\n#         i = i + 1\n#\n#         save_csv_name = code_main + '_hm_0720.csv'  ##  --len_k\n#\n#         with open('/SSD/lz/tsm_ptms_anomaly_detection/other_anomaly_baselines/scripts/ucr_at.sh', 'a') as f:\n#             f.write('python ' + code_main + '.py '\n#                     '--anormly_ratio 1 --dataset ' + dataset\n#                     +\n#                     ' --save_csv_name ' + save_csv_name + ' --gpu 0' + ';\\n')   ## anomaly_ratio  anormly_ratio  anormly_ratio\n\n\ncode_main_list = ['train_gpt4ts']  ## , 'train_gpt4ts'  train_timesnet  train_dcdetector  train_at_multi\n## train_gpt4ts_uni  train_timesnet_uni\nuni_datasets =  [79, 108, 187, 203]\nfor dataset in uni_datasets:\n    i = 1\n    for code_main in code_main_list:\n        print(\"i = \", i, \"dataset_name = UCR\")\n        i = i + 1\n\n        # save_csv_name = code_main + '_hm_0720.csv'  ##  --len_k\n\n        with open('/dev_data/lz/tsm_ptms_anomaly_detection/other_anomaly_baselines/scripts/ucr_at.sh', 'a') as f:\n            f.write('python ' + code_main + '.py '\n                    '--index ' + str(dataset)\n                    +  ';\\n')   ## anomaly_ratio  anormly_ratio  anormly_ratio\n\n\n###  --cuda cuda:0\n\n## nohup ./scripts/uni_at.sh &\n\n## nohup ./scripts/multi_at.sh &\n\n## nohup ./scripts/ucr_at.sh &\n\n## nohup ./scripts/ucr_at_delta_0.sh &\n\n## nohup ./scripts/ucr_at_delta_1.sh &\n\n## nohup ./scripts/ucr_at_delta_1_2.sh &\n\n\n## nohup ./scripts/ucr_at_zeta0.sh &\n\n## nohup ./scripts/at_zeta1.sh &\n## nohup ./scripts/at_zeta0.sh &\n\n## nohup ./scripts/kpi.sh &\n## nohup ./scripts/yahoo.sh &"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/kpi.sh",
    "content": "python train_at_uni.py --dataset kpi --batch_size 8 --save_csv_name train_at_uni_0720_.csv --cuda cuda:0;\npython train_at_uni.py --dataset yahoo --batch_size 8 --save_csv_name train_at_uni_0720_.csv --cuda cuda:0;"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/multi_at.sh",
    "content": "python train_lstm_vae_multi.py --dataset SMD --save_csv_name train_lstm_vae_multi_0717.csv --gpu 1;\npython train_donut_multi.py --dataset SMD --save_csv_name train_donut_multi_0717.csv --gpu 1;\npython train_ts2vec_multi.py --dataset SMD --save_csv_name train_ts2vec_multi_0717.csv --gpu 1;\npython train_dcdetector.py --dataset SMD --save_csv_name train_dcdetector_0717.csv --gpu 1;\npython train_lstm_vae_multi.py --dataset MSL --save_csv_name train_lstm_vae_multi_0717.csv --gpu 1;\npython train_donut_multi.py --dataset MSL --save_csv_name train_donut_multi_0717.csv --gpu 1;\npython train_ts2vec_multi.py --dataset MSL --save_csv_name train_ts2vec_multi_0717.csv --gpu 1;\npython train_dcdetector.py --dataset MSL --save_csv_name train_dcdetector_0717.csv --gpu 1;\npython train_lstm_vae_multi.py --dataset SMAP --save_csv_name train_lstm_vae_multi_0717.csv --gpu 1;\npython train_donut_multi.py --dataset SMAP --save_csv_name train_donut_multi_0717.csv --gpu 1;\npython train_ts2vec_multi.py --dataset SMAP --save_csv_name train_ts2vec_multi_0717.csv --gpu 1;\npython train_dcdetector.py --dataset SMAP --save_csv_name train_dcdetector_0717.csv --gpu 1;\npython train_ts2vec_multi.py --dataset PSM --save_csv_name train_ts2vec_multi_0717.csv --gpu 1;\npython train_dcdetector.py --dataset PSM --save_csv_name train_dcdetector_0717.csv --gpu 1;\npython train_lstm_vae_multi.py --dataset SWAT --save_csv_name train_lstm_vae_multi_0717.csv --gpu 1;\npython train_donut_multi.py --dataset SWAT --save_csv_name train_donut_multi_0717.csv --gpu 1;\npython train_ts2vec_multi.py --dataset SWAT --save_csv_name train_ts2vec_multi_0717.csv --gpu 1;\npython train_dcdetector.py --dataset SWAT --save_csv_name train_dcdetector_0717.csv --gpu 1;\npython train_lstm_vae_multi.py --dataset NIPS_TS_Swan --save_csv_name train_lstm_vae_multi_0717.csv --gpu 1;\npython train_donut_multi.py --dataset NIPS_TS_Swan --save_csv_name train_donut_multi_0717.csv --gpu 1;\npython train_ts2vec_multi.py --dataset NIPS_TS_Swan --save_csv_name train_ts2vec_multi_0717.csv --gpu 1;\npython train_dcdetector.py --dataset NIPS_TS_Swan --save_csv_name train_dcdetector_0717.csv --gpu 1;\npython train_lstm_vae_multi.py --dataset NIPS_TS_Water --save_csv_name train_lstm_vae_multi_0717.csv --gpu 1;\npython train_donut_multi.py --dataset NIPS_TS_Water --save_csv_name train_donut_multi_0717.csv --gpu 1;\npython train_ts2vec_multi.py --dataset NIPS_TS_Water --save_csv_name train_ts2vec_multi_0717.csv --gpu 1;\npython train_dcdetector.py --dataset NIPS_TS_Water --save_csv_name train_dcdetector_0717.csv --gpu 1;\npython train_timesnet.py --data SMD --save_csv_name train_timesnet_0717.csv --gpu 1;\npython train_gpt4ts.py --data SMD --save_csv_name train_gpt4ts_0717.csv --gpu 1;\npython train_timesnet.py --data MSL --save_csv_name train_timesnet_0717.csv --gpu 1;\npython train_gpt4ts.py --data MSL --save_csv_name train_gpt4ts_0717.csv --gpu 1;\npython train_timesnet.py --data SMAP --save_csv_name train_timesnet_0717.csv --gpu 1;\npython train_gpt4ts.py --data SMAP --save_csv_name train_gpt4ts_0717.csv --gpu 1;\npython train_timesnet.py --data PSM --save_csv_name train_timesnet_0717.csv --gpu 1;\npython train_gpt4ts.py --data PSM --save_csv_name train_gpt4ts_0717.csv --gpu 1;\npython train_timesnet.py --data SWAT --save_csv_name train_timesnet_0717.csv --gpu 1;\npython train_gpt4ts.py --data SWAT --save_csv_name train_gpt4ts_0717.csv --gpu 1;\npython train_timesnet.py --data NIPS_TS_Swan --save_csv_name train_timesnet_0717.csv --gpu 1;\npython train_gpt4ts.py --data NIPS_TS_Swan --save_csv_name train_gpt4ts_0717.csv --gpu 1;\npython train_timesnet.py --data NIPS_TS_Water --save_csv_name train_timesnet_0717.csv --gpu 1;\npython train_gpt4ts.py --data NIPS_TS_Water --save_csv_name train_gpt4ts_0717.csv --gpu 1;\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at.sh",
    "content": "python train_dcdetector.py --index 38;\npython train_dcdetector.py --index 54;\npython train_dcdetector.py --index 71;\npython train_dcdetector.py --index 72;\npython train_dcdetector.py --index 79;\npython train_dcdetector.py --index 85;\npython train_dcdetector.py --index 88;\npython train_dcdetector.py --index 108;\npython train_dcdetector.py --index 146;\npython train_dcdetector.py --index 162;\npython train_dcdetector.py --index 179;\npython train_dcdetector.py --index 180;\npython train_dcdetector.py --index 187;\npython train_dcdetector.py --index 193;\npython train_dcdetector.py --index 196;\npython train_dcdetector.py --index 203;\npython train_dcdetector.py --index 212;\npython train_dcdetector.py --index 229;\npython train_dcdetector.py --index 232;"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at_delta_0.sh",
    "content": "python train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 35 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 36 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 37 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 38 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 39 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 40 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 41 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 42 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 43 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 44 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 45 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 46 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 47 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 48 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 49 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 50 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 51 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 52 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 53 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 54 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 55 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 56 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 57 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 58 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 59 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 60 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 61 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 62 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 63 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 64 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 65 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 66 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 67 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 68 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 69 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 70 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 71 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 72 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 73 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 74 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 75 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 76 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 77 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 78 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 79 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 80 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 81 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 82 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 83 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 84 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 85 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 86 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 87 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 88 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 89 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 90 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 91 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 92 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 93 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 94 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 95 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 96 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 97 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 98 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 99 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 100 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 101 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 102 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 103 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 104 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 105 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 106 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 107 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 108 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 109 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 110 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 111 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 112 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 113 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 114 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 115 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 116 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 117 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 118 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 119 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 120 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 121 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 122 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 123 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 124 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 125 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 126 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 127 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 128 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 129 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 130 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 131 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 132 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 133 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 134 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 135 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 136 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 137 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 138 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 139 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 140 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 141 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 142 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 143 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 144 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 145 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 146 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 147 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 148 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 149 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 150 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 151 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 152 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 153 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 154 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 155 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 156 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 157 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 158 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 159 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 160 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 161 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 162 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 163 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 164 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 165 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 166 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 167 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 168 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 169 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 170 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 171 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 172 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 173 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 174 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 175 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 176 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 177 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 178 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 179 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 180 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 181 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 182 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 183 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 184 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 185 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 186 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 187 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 188 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 189 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 190 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 191 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 192 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 193 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 194 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 195 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 196 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 197 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 198 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 199 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 200 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 201 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 202 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 203 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 204 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 205 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 206 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 207 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 208 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 209 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 210 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 211 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 212 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 213 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 214 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 215 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 216 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 217 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 218 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 219 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 220 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 221 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 222 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 223 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 224 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 225 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 226 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 227 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 228 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 229 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 230 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 231 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 232 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 233 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 234 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 235 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 236 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 237 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 238 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 239 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 240 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 241 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 242 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 243 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 244 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 245 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 246 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 247 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 248 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 249 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 250 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 1 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 2 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 3 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 4 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 5 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 6 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 7 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 8 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 9 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 10 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 11 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 12 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 13 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 14 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 15 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 16 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 17 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 18 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 19 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 20 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 21 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 22 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 23 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 24 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 25 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 26 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 27 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 28 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 29 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 30 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 31 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 32 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 33 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 34 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 35 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 36 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 37 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 38 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 39 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 40 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 41 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 42 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 43 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 44 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 45 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 46 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 47 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 48 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 49 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 50 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 51 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 52 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 53 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 54 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 55 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 56 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 57 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 58 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 59 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 60 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 61 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 62 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 63 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 64 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 65 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 66 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 67 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 68 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 69 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 70 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 71 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 72 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 73 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 74 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 75 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 76 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 77 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 78 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 79 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 80 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 81 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 82 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 83 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 84 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 85 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 86 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 87 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 88 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 89 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 90 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 91 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 92 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 93 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 94 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 95 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 96 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 97 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 98 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 99 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 100 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 101 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 102 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 103 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 104 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 105 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 106 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 107 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 108 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 109 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 110 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 111 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 112 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 113 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 114 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 115 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 116 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 117 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 118 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 119 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 120 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 121 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 122 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 123 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 124 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 125 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 126 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 127 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 128 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 129 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 130 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 131 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 132 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 133 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 134 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 135 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 136 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 137 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 138 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 139 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 140 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 141 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 142 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 143 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 144 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 145 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 146 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 147 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 148 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 149 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 150 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 151 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 152 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 153 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 154 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 155 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 156 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 157 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 158 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 159 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 160 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 161 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 162 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 163 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 164 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 165 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 166 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 167 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 168 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 169 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 170 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 171 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 172 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 173 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 174 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 175 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 176 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 177 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 178 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 179 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 180 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 181 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 182 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 183 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 184 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 185 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 186 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 187 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 188 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 189 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 190 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 191 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 192 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 193 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 194 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 195 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 196 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 197 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 198 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 199 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 200 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 201 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 202 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 203 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 204 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 205 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 206 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 207 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 208 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 209 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 210 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 211 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 212 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 213 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 214 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 215 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 216 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 217 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 218 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 219 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 220 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 221 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 222 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 223 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 224 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 225 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 226 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 227 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 228 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 229 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 230 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 231 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 232 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 233 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 234 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 235 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 236 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 237 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 238 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 239 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 240 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 241 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 242 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 243 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 244 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 245 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 246 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 247 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 248 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 249 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 250 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at_delta_1.sh",
    "content": "python train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 1 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 2 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 3 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 4 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 5 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 6 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 7 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 8 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 9 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 10 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 11 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 12 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 13 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 14 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 15 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 16 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 17 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 18 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 19 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 20 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 21 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 22 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 23 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 24 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 25 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 26 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 27 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 28 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 29 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 30 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 31 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 32 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 33 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 34 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 35 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 36 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 37 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 38 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 39 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 40 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 41 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 42 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 43 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 44 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 45 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 46 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 47 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 48 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 49 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 50 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 51 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 52 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 53 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 54 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 55 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 56 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 57 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 58 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 59 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 60 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 61 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 62 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 63 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 64 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 65 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 66 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 67 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 68 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 69 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 70 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 71 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 72 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 73 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 74 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 75 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 76 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 77 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 78 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 79 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 80 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 81 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 82 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 83 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 84 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 85 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 86 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 87 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 88 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 89 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 90 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 91 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 92 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 93 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 94 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 95 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 96 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 97 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 98 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 99 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 100 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 101 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 102 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 103 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 104 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 105 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 106 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 107 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 108 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 109 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 110 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 111 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 112 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 113 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 114 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 115 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 116 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 117 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 118 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 119 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 120 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 121 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 122 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 123 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 124 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 125 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 126 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 127 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 128 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 129 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 130 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 131 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 132 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 133 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 134 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 135 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 136 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 137 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 138 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 139 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 140 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 141 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 142 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 143 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 144 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 145 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 146 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 147 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 148 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 149 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 150 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 151 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 152 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 153 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 154 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 155 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 156 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 157 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 158 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 159 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 160 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 161 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 162 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 163 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 164 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 165 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 166 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 167 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 168 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 169 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 170 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 171 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 172 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 173 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 174 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 175 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 176 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 177 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 178 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 179 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 180 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 181 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 182 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 183 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 184 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 185 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 186 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 187 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 188 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 189 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 190 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 191 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 192 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 193 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 194 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 195 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 196 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 197 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 198 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 199 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 200 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 201 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 202 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 203 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 204 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 205 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 206 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 207 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 208 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 209 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 210 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 211 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 212 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 213 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 214 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 215 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 216 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 217 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 218 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 219 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 220 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 221 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 222 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 223 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 224 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 225 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 226 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 227 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 228 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 229 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 230 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 231 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 232 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 233 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 234 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 235 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 236 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 237 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 238 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 239 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 240 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 241 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 242 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 243 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 244 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 245 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 246 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 247 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 248 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 249 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npython train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 250 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at_delta_1_2.sh",
    "content": "python train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 35 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 36 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 37 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 38 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 39 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 40 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 41 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 42 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 43 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 44 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 45 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 46 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 47 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 48 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 49 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 50 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 51 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 52 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 53 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 54 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 55 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 56 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 57 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 58 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 59 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 60 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 61 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 62 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 63 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 64 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 65 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 66 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 67 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 68 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 69 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 70 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 71 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 72 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 73 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 74 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 75 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 76 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 77 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 78 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 79 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 80 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\npython train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 81 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at_zeta0.sh",
    "content": "python train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 214 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 215 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 216 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 217 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 218 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 219 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 220 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 221 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 222 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 223 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 224 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 225 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 226 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 227 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 228 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 229 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 230 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 231 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 232 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 233 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 234 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 235 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 236 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 237 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 238 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 239 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 240 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 241 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 242 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 243 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 244 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 245 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 246 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 247 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 248 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 249 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\npython train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 250 --save_csv_name train_at_multi_ucr_0719.csv --cuda cuda:1;\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/uni_at.sh",
    "content": "python train_gpt4ts_uni.py --anomaly_ratio 1 --data kpi --save_csv_name train_gpt4ts_uni_hm_0720.csv --gpu 1;\npython train_gpt4ts_uni.py --anomaly_ratio 1 --data yahoo --save_csv_name train_gpt4ts_uni_hm_0720.csv --gpu 0;\npython train_timesnet_uni.py --anomaly_ratio 1 --data kpi --save_csv_name train_timesnet_uni_hm_0720.csv --gpu 0;\npython train_timesnet_uni.py --anomaly_ratio 1 --data yahoo --save_csv_name train_timesnet_uni_hm_0720.csv --gpu 0;\npython train_dcdetector_nui.py --anormly_ratio 1 --dataset kpi --save_csv_name train_dcdetector_nui_hm_0720.csv --gpu 0;\npython train_dcdetector_nui.py --anormly_ratio 1 --dataset yahoo --save_csv_name train_dcdetector_nui_hm_0720.csv --gpu 1;\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/yahoo.sh",
    "content": "python train_dcdetector_nui.py --anormly_ratio 1 --dataset kpi --save_csv_name train_dcdetector_nui_hm_0720.csv --gpu 0;\npython train_dcdetector_nui.py --anormly_ratio 1 --dataset yahoo --save_csv_name train_dcdetector_nui_hm_0720.csv --gpu 0;"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/spot.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 12 10:08:16 2016\n\n@author: Alban Siffer \n@company: Amossys\n@license: GNU GPLv3\n\"\"\"\n\nfrom scipy.optimize import minimize\nfrom math import log,floor\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tqdm\n\n# colors for plot\ndeep_saffron = '#FF9933'\nair_force_blue = '#5D8AA8'\n\n\n\"\"\"\n================================= MAIN CLASS ==================================\n\"\"\"\n\nclass SPOT:\n    \"\"\"\n    This class allows to run SPOT algorithm on univariate dataset (upper-bound)\n    \n    Attributes\n    ----------\n    proba : float\n        Detection level (risk), chosen by the user\n        \n    extreme_quantile : float\n        current threshold (bound between normal and abnormal events)\n        \n    data : numpy.array\n        stream\n    \n    init_data : numpy.array\n        initial batch of observations (for the calibration/initialization step)\n    \n    init_threshold : float\n        initial threshold computed during the calibration step\n    \n    peaks : numpy.array\n        array of peaks (excesses above the initial threshold)\n    \n    n : int\n        number of observed values\n    \n    Nt : int\n        number of observed peaks\n    \"\"\"\n    \n    def __init__(self, q = 1e-4):\n        \"\"\"\n        Constructor\n\n\t    Parameters\n\t    ----------\n\t    q\n\t\t    Detection level (risk)\n\t\n\t    Returns\n\t    ----------\n    \tSPOT object\n        \"\"\"\n        self.proba = q\n        self.extreme_quantile = None\n        self.data = None\n        self.init_data = None\n        self.init_threshold = None\n        self.peaks = None\n        self.n = 0\n        self.Nt = 0\n        \n    def __str__(self):\n        s = ''\n        s += 'Streaming Peaks-Over-Threshold Object\\n'\n        s += 'Detection level q = %s\\n' % self.proba\n        if self.data is not None:\n            s += 'Data imported : Yes\\n'\n            s += '\\t initialization  : %s values\\n' % self.init_data.size\n            s += '\\t stream : %s values\\n' % self.data.size\n        else:\n            s += 'Data imported : No\\n'\n            return s\n            \n        if self.n == 0:\n            s += 'Algorithm initialized : No\\n'\n        else:\n            s += 'Algorithm initialized : Yes\\n'\n            s += '\\t initial threshold : %s\\n' % self.init_threshold\n            \n            r = self.n-self.init_data.size\n            if r > 0:\n                s += 'Algorithm run : Yes\\n'\n                s += '\\t number of observations : %s (%.2f %%)\\n' % (r,100*r/self.n)\n            else:\n                s += '\\t number of peaks  : %s\\n' % self.Nt\n                s += '\\t extreme quantile : %s\\n' % self.extreme_quantile\n                s += 'Algorithm run : No\\n'\n        return s\n    \n    \n    def fit(self,init_data,data):\n        \"\"\"\n        Import data to SPOT object\n        \n        Parameters\n\t    ----------\n\t    init_data : list, numpy.array or pandas.Series\n\t\t    initial batch to calibrate the algorithm\n            \n        data : numpy.array\n\t\t    data for the run (list, np.array or pd.series)\n\t\n        \"\"\"\n        # print(\"init_data.shape = \", init_data.shape, \", data.shape = \", data.shape)\n        if isinstance(data,list):\n            self.data = np.array(data)\n        elif isinstance(data,np.ndarray):\n            self.data = data\n        elif isinstance(data,pd.Series):\n            self.data = data.values\n        else:\n            print('This data format (%s) is not supported' % type(data))\n            return\n            \n        if isinstance(init_data,list):\n            self.init_data = np.array(init_data)\n        elif isinstance(init_data,np.ndarray):\n            self.init_data = init_data\n        elif isinstance(init_data,pd.Series):\n            self.init_data = init_data.values\n        elif isinstance(init_data,int):\n            self.init_data = self.data[:init_data]\n            self.data = self.data[init_data:]\n        elif isinstance(init_data,float) & (init_data<1) & (init_data>0):\n            r = int(init_data*data.size)\n            self.init_data = self.data[:r]\n            self.data = self.data[r:]\n        else:\n            print('The initial data cannot be set')\n            return\n        \n    def add(self,data):\n        \"\"\"\n        This function allows to append data to the already fitted data\n        \n        Parameters\n\t    ----------\n\t    data : list, numpy.array, pandas.Series\n\t\t    data to append\n        \"\"\"\n        if isinstance(data,list):\n            data = np.array(data)\n        elif isinstance(data,np.ndarray):\n            data = data\n        elif isinstance(data,pd.Series):\n            data = data.values\n        else:\n            print('This data format (%s) is not supported' % type(data))\n            return\n        \n        self.data = np.append(self.data,data)\n        return\n    \n    def initialize(self, level = 0.98, verbose = True):\n        \"\"\"\n        Run the calibration (initialization) step\n        \n        Parameters\n\t    ----------\n        level : float\n            (default 0.98) Probability associated with the initial threshold t \n\t    verbose : bool\n\t\t    (default = True) If True, gives details about the batch initialization\n        \"\"\"\n        level = level-floor(level)\n        \n        n_init = self.init_data.size\n        \n        S = np.sort(self.init_data)     # we sort X to get the empirical quantile\n        self.init_threshold = S[int(level*n_init)] # t is fixed for the whole algorithm\n\n        # initial peaks\n        self.peaks = self.init_data[self.init_data>self.init_threshold]-self.init_threshold \n        self.Nt = self.peaks.size\n        self.n = n_init\n        \n        if verbose:\n            print('Initial threshold : %s' % self.init_threshold)\n            print('Number of peaks : %s' % self.Nt)\n            print('Grimshaw maximum log-likelihood estimation ... ', end = '')\n            \n        g,s,l = self._grimshaw()\n        self.extreme_quantile = self._quantile(g,s)\n        \n        if verbose:\n            print('[done]')\n            print('\\t'+chr(0x03B3) + ' = ' + str(g))\n            print('\\t'+chr(0x03C3) + ' = ' + str(s))\n            print('\\tL = ' + str(l))\n            print('Extreme quantile (probability = %s): %s' % (self.proba,self.extreme_quantile))\n        \n        return \n    \n    \n    \n    \n    def _rootsFinder(fun,jac,bounds,npoints,method):\n        \"\"\"\n        Find possible roots of a scalar function\n        \n        Parameters\n        ----------\n        fun : function\n\t\t    scalar function \n        jac : function\n            first order derivative of the function  \n        bounds : tuple\n            (min,max) interval for the roots search    \n        npoints : int\n            maximum number of roots to output      \n        method : str\n            'regular' : regular sample of the search interval, 'random' : uniform (distribution) sample of the search interval\n        \n        Returns\n        ----------\n        numpy.array\n            possible roots of the function\n        \"\"\"\n        if method == 'regular':\n            step = (bounds[1]-bounds[0])/(npoints+1)\n            # print(\"step = \", step, \", bounds[0] = \", bounds[0], \", bounds[1] = \", bounds[1])\n            if step == 0:\n                X0 = np.random.uniform(bounds[0],bounds[1],npoints)\n            else:\n                X0 = np.arange(bounds[0]+step,bounds[1],step)\n\n            ## for the ucr 239 240 241\n            # step = (bounds[1] - bounds[0]) / (npoints+1)\n            # # print(\"step = \", step, \", bounds[0] = \", bounds[0], \", bounds[1] = \", bounds[1])\n            # X0 = np.arange(bounds[0], bounds[1], step)\n        elif method == 'random':\n            X0 = np.random.uniform(bounds[0],bounds[1],npoints)\n        \n        def objFun(X,f,jac):\n            g = 0\n            j = np.zeros(X.shape)\n            i = 0\n            for x in X:\n                fx = f(x)\n                g = g+fx**2\n                j[i] = 2*fx*jac(x)\n                i = i+1\n            return g,j\n        \n        opt = minimize(lambda X:objFun(X,fun,jac), X0, \n                       method='L-BFGS-B', \n                       jac=True, bounds=[bounds]*len(X0))\n        \n        X = opt.x\n        np.round(X,decimals = 5)\n        return np.unique(X)\n    \n    \n    def _log_likelihood(Y,gamma,sigma):\n        \"\"\"\n        Compute the log-likelihood for the Generalized Pareto Distribution (μ=0)\n        \n        Parameters\n        ----------\n        Y : numpy.array\n\t\t    observations\n        gamma : float\n            GPD index parameter\n        sigma : float\n            GPD scale parameter (>0)   \n\n        Returns\n        ----------\n        float\n            log-likelihood of the sample Y to be drawn from a GPD(γ,σ,μ=0)\n        \"\"\"\n        n = Y.size\n        if gamma != 0:\n            tau = gamma/sigma\n            L = -n * log(sigma) - ( 1 + (1/gamma) ) * ( np.log(1+tau*Y) ).sum()\n        else:\n            L = n * ( 1 + log(Y.mean()) )\n        return L\n\n\n    def _grimshaw(self,epsilon = 1e-8, n_points = 10):\n        \"\"\"\n        Compute the GPD parameters estimation with the Grimshaw's trick\n        \n        Parameters\n        ----------\n        epsilon : float\n\t\t    numerical parameter to perform (default : 1e-8)\n        n_points : int\n            maximum number of candidates for maximum likelihood (default : 10)\n\n        Returns\n        ----------\n        gamma_best,sigma_best,ll_best\n            gamma estimates, sigma estimates and corresponding log-likelihood\n        \"\"\"\n        def u(s):\n            return 1 + np.log(s).mean()\n            \n        def v(s):\n            return np.mean(1/s)\n        \n        def w(Y,t):\n            s = 1+t*Y\n            us = u(s)\n            vs = v(s)\n            return us*vs-1\n        \n        def jac_w(Y,t):\n            s = 1+t*Y\n            us = u(s)\n            vs = v(s)\n            jac_us = (1/t)*(1-vs)\n            jac_vs = (1/t)*(-vs+np.mean(1/s**2))\n            return us*jac_vs+vs*jac_us\n            \n    \n        Ym = self.peaks.min()\n        YM = self.peaks.max()\n        Ymean = self.peaks.mean()\n        \n        \n        a = -1/YM\n        if abs(a)<2*epsilon:\n            epsilon = abs(a)/n_points\n        \n        a = a + epsilon\n        b = 2*(Ymean-Ym)/(Ymean*Ym)\n        c = 2*(Ymean-Ym)/(Ym**2)\n    \n        # We look for possible roots\n        left_zeros = SPOT._rootsFinder(lambda t: w(self.peaks,t),\n                                 lambda t: jac_w(self.peaks,t),\n                                 (a+epsilon,-epsilon),\n                                 n_points,'regular')\n        \n        right_zeros = SPOT._rootsFinder(lambda t: w(self.peaks,t),\n                                  lambda t: jac_w(self.peaks,t),\n                                  (b,c),\n                                  n_points,'regular')\n    \n        # all the possible roots\n        zeros = np.concatenate((left_zeros,right_zeros))\n        \n        # 0 is always a solution so we initialize with it\n        gamma_best = 0\n        sigma_best = Ymean\n        ll_best = SPOT._log_likelihood(self.peaks,gamma_best,sigma_best)\n        \n        # we look for better candidates\n        for z in zeros:\n            gamma = u(1+z*self.peaks)-1\n            sigma = gamma/z\n            ll = SPOT._log_likelihood(self.peaks,gamma,sigma)\n            if ll>ll_best:\n                gamma_best = gamma\n                sigma_best = sigma\n                ll_best = ll\n    \n        return gamma_best,sigma_best,ll_best\n\n    \n\n    def _quantile(self,gamma,sigma):\n        \"\"\"\n        Compute the quantile at level 1-q\n        \n        Parameters\n        ----------\n        gamma : float\n\t\t    GPD parameter\n        sigma : float\n            GPD parameter\n\n        Returns\n        ----------\n        float\n            quantile at level 1-q for the GPD(γ,σ,μ=0)\n        \"\"\"\n        r = self.n * self.proba / self.Nt\n        if gamma != 0:\n            return self.init_threshold + (sigma/gamma)*(pow(r,-gamma)-1)\n        else:\n            return self.init_threshold - sigma*log(r)\n\n        \n    def run(self, with_alarm = True):\n        \"\"\"\n        Run SPOT on the stream\n        \n        Parameters\n        ----------\n        with_alarm : bool\n\t\t    (default = True) If False, SPOT will adapt the threshold assuming \\\n            there is no abnormal values\n\n\n        Returns\n        ----------\n        dict\n            keys : 'thresholds' and 'alarms'\n            \n            'thresholds' contains the extreme quantiles and 'alarms' contains \\\n            the indexes of the values which have triggered alarms\n            \n        \"\"\"\n        if (self.n>self.init_data.size):\n            print('Warning : the algorithm seems to have already been run, you \\\n            should initialize before running again')\n            return {}\n        \n        # list of the thresholds\n        th = []\n        alarm = []\n        scores = []\n        # Loop over the stream\n        for i in tqdm.tqdm(range(self.data.size)):\n            scores.append(self.data[i])\n            # If the observed value exceeds the current threshold (alarm case)\n            if self.data[i]>self.extreme_quantile:\n                # if we want to alarm, we put it in the alarm list\n                if with_alarm:\n                    alarm.append(i)\n                # otherwise we add it in the peaks\n                else:\n                    self.peaks = np.append(self.peaks,self.data[i]-self.init_threshold)\n                    self.Nt += 1\n                    self.n += 1\n                    # and we update the thresholds\n\n                    g,s,l = self._grimshaw()\n                    self.extreme_quantile = self._quantile(g,s)\n\n\n            # case where the value exceeds the initial threshold but not the alarm ones\n            elif self.data[i]>self.init_threshold:\n                    # we add it in the peaks\n                    self.peaks = np.append(self.peaks,self.data[i]-self.init_threshold)\n                    self.Nt += 1\n                    self.n += 1\n                    # and we update the thresholds\n\n                    g,s,l = self._grimshaw()\n                    self.extreme_quantile = self._quantile(g,s)\n            else:\n                self.n += 1\n\n                \n            th.append(self.extreme_quantile) # thresholds record\n        \n        return {'thresholds' : th, 'alarms': alarm, 'scores': scores}\n    \n\n    def plot(self,run_results,with_alarm = True):\n        \"\"\"\n        Plot the results of given by the run\n        \n        Parameters\n        ----------\n        run_results : dict\n            results given by the 'run' method\n        with_alarm : bool\n\t\t    (default = True) If True, alarms are plotted.\n\n\n        Returns\n        ----------\n        list\n            list of the plots\n            \n        \"\"\"\n        x = range(self.data.size)\n        K = run_results.keys()\n        \n        ts_fig, = plt.plot(x,self.data,color=air_force_blue)\n        fig = [ts_fig]\n        \n        if 'thresholds' in K:\n            th = run_results['thresholds']\n            th_fig, = plt.plot(x,th,color=deep_saffron,lw=2,ls='dashed')\n            fig.append(th_fig)\n        \n        if with_alarm and ('alarms' in K):\n            alarm = run_results['alarms']\n            al_fig = plt.scatter(alarm,self.data[alarm],color='red')\n            fig.append(al_fig)\n            \n        plt.xlim((0,self.data.size))\n\n        \n        return fig\n\n\n\n\"\"\"\n================================= WITH DRIFT ==================================\n\"\"\"\n\ndef backMean(X,d):\n    M = []\n    w = X[:d].sum()\n    M.append(w/d)\n    for i in range(d,len(X)):\n        w = w - X[i-d] + X[i]\n        M.append(w/d)\n    return np.array(M)\n\n\n\nclass dSPOT:\n    \"\"\"\n    This class allows to run DSPOT algorithm on univariate dataset (upper-bound)\n    \n    Attributes\n    ----------\n    proba : float\n        Detection level (risk), chosen by the user\n        \n    depth : int\n        Number of observations to compute the moving average\n        \n    extreme_quantile : float\n        current threshold (bound between normal and abnormal events)\n        \n    data : numpy.array\n        stream\n    \n    init_data : numpy.array\n        initial batch of observations (for the calibration/initialization step)\n    \n    init_threshold : float\n        initial threshold computed during the calibration step\n    \n    peaks : numpy.array\n        array of peaks (excesses above the initial threshold)\n    \n    n : int\n        number of observed values\n    \n    Nt : int\n        number of observed peaks\n    \"\"\"\n    def __init__(self, q, depth):\n        self.proba = q\n        self.extreme_quantile = None\n        self.data = None\n        self.init_data = None\n        self.init_threshold = None\n        self.peaks = None\n        self.n = 0\n        self.Nt = 0\n        self.depth = depth\n        \n    def __str__(self):\n        s = ''\n        s += 'Streaming Peaks-Over-Threshold Object\\n'\n        s += 'Detection level q = %s\\n' % self.proba\n        if self.data is not None:\n            s += 'Data imported : Yes\\n'\n            s += '\\t initialization  : %s values\\n' % self.init_data.size\n            s += '\\t stream : %s values\\n' % self.data.size\n        else:\n            s += 'Data imported : No\\n'\n            return s\n            \n        if self.n == 0:\n            s += 'Algorithm initialized : No\\n'\n        else:\n            s += 'Algorithm initialized : Yes\\n'\n            s += '\\t initial threshold : %s\\n' % self.init_threshold\n            \n            r = self.n-self.init_data.size\n            if r > 0:\n                s += 'Algorithm run : Yes\\n'\n                s += '\\t number of observations : %s (%.2f %%)\\n' % (r,100*r/self.n)\n                s += '\\t triggered alarms : %s (%.2f %%)\\n' % (len(self.alarm),100*len(self.alarm)/self.n)\n            else:\n                s += '\\t number of peaks  : %s\\n' % self.Nt\n                s += '\\t extreme quantile : %s\\n' % self.extreme_quantile\n                s += 'Algorithm run : No\\n'\n        return s\n    \n    \n    def fit(self,init_data,data):\n        \"\"\"\n        Import data to DSPOT object\n        \n        Parameters\n\t    ----------\n\t    init_data : list, numpy.array or pandas.Series\n\t\t    initial batch to calibrate the algorithm\n            \n        data : numpy.array\n\t\t    data for the run (list, np.array or pd.series)\n\t\n        \"\"\"\n        if isinstance(data,list):\n            self.data = np.array(data)\n        elif isinstance(data,np.ndarray):\n            self.data = data\n        elif isinstance(data,pd.Series):\n            self.data = data.values\n        else:\n            print('This data format (%s) is not supported' % type(data))\n            return\n            \n        if isinstance(init_data,list):\n            self.init_data = np.array(init_data)\n        elif isinstance(init_data,np.ndarray):\n            self.init_data = init_data\n        elif isinstance(init_data,pd.Series):\n            self.init_data = init_data.values\n        elif isinstance(init_data,int):\n            self.init_data = self.data[:init_data]\n            self.data = self.data[init_data:]\n        elif isinstance(init_data,float) & (init_data<1) & (init_data>0):\n            r = int(init_data*data.size)\n            self.init_data = self.data[:r]\n            self.data = self.data[r:]\n        else:\n            print('The initial data cannot be set')\n            return\n        \n    def add(self,data):\n        \"\"\"\n        This function allows to append data to the already fitted data\n        \n        Parameters\n\t    ----------\n\t    data : list, numpy.array, pandas.Series\n\t\t    data to append\n        \"\"\"\n        if isinstance(data,list):\n            data = np.array(data)\n        elif isinstance(data,np.ndarray):\n            data = data\n        elif isinstance(data,pd.Series):\n            data = data.values\n        else:\n            print('This data format (%s) is not supported' % type(data))\n            return\n        \n        self.data = np.append(self.data,data)\n        return\n    \n    def initialize(self, verbose = True):\n        \"\"\"\n        Run the calibration (initialization) step\n        \n        Parameters\n\t    ----------\n\t    verbose : bool\n\t\t    (default = True) If True, gives details about the batch initialization\n        \"\"\"\n        n_init = self.init_data.size - self.depth\n        \n        M = backMean(self.init_data,self.depth)\n        T = self.init_data[self.depth:]-M[:-1] # new variable\n        \n        S = np.sort(T)     # we sort X to get the empirical quantile\n        self.init_threshold = S[int(0.98*n_init)] # t is fixed for the whole algorithm\n\n        # initial peaks\n        self.peaks = T[T>self.init_threshold]-self.init_threshold \n        self.Nt = self.peaks.size\n        self.n = n_init\n        \n        if verbose:\n            print('Initial threshold : %s' % self.init_threshold)\n            print('Number of peaks : %s' % self.Nt)\n            print('Grimshaw maximum log-likelihood estimation ... ', end = '')\n            \n        g,s,l = self._grimshaw()\n        self.extreme_quantile = self._quantile(g,s)\n        \n        if verbose:\n            print('[done]')\n            print('\\t'+chr(0x03B3) + ' = ' + str(g))\n            print('\\t'+chr(0x03C3) + ' = ' + str(s))\n            print('\\tL = ' + str(l))\n            print('Extreme quantile (probability = %s): %s' % (self.proba,self.extreme_quantile))\n        \n        return\n    \n    \n    \n    \n    def _rootsFinder(fun,jac,bounds,npoints,method):\n        \"\"\"\n        Find possible roots of a scalar function\n        \n        Parameters\n        ----------\n        fun : function\n\t\t    scalar function \n        jac : function\n            first order derivative of the function  \n        bounds : tuple\n            (min,max) interval for the roots search    \n        npoints : int\n            maximum number of roots to output      \n        method : str\n            'regular' : regular sample of the search interval, 'random' : uniform (distribution) sample of the search interval\n        \n        Returns\n        ----------\n        numpy.array\n            possible roots of the function\n        \"\"\"\n        if method == 'regular':\n            step = (bounds[1]-bounds[0])/(npoints+1)\n            X0 = np.arange(bounds[0]+step,bounds[1],step)\n        elif method == 'random':\n            X0 = np.random.uniform(bounds[0],bounds[1],npoints)\n        \n        def objFun(X,f,jac):\n            g = 0\n            j = np.zeros(X.shape)\n            i = 0\n            for x in X:\n                fx = f(x)\n                g = g+fx**2\n                j[i] = 2*fx*jac(x)\n                i = i+1\n            return g,j\n        \n        opt = minimize(lambda X:objFun(X,fun,jac), X0, \n                       method='L-BFGS-B', \n                       jac=True, bounds=[bounds]*len(X0))\n        \n        X = opt.x\n        np.round(X,decimals = 5)\n        return np.unique(X)\n    \n    \n    def _log_likelihood(Y,gamma,sigma):\n        \"\"\"\n        Compute the log-likelihood for the Generalized Pareto Distribution (μ=0)\n        \n        Parameters\n        ----------\n        Y : numpy.array\n\t\t    observations\n        gamma : float\n            GPD index parameter\n        sigma : float\n            GPD scale parameter (>0)   \n\n        Returns\n        ----------\n        float\n            log-likelihood of the sample Y to be drawn from a GPD(γ,σ,μ=0)\n        \"\"\"\n        n = Y.size\n        if gamma != 0:\n            tau = gamma/sigma\n            L = -n * log(sigma) - ( 1 + (1/gamma) ) * ( np.log(1+tau*Y) ).sum()\n        else:\n            L = n * ( 1 + log(Y.mean()) )\n        return L\n\n\n    def _grimshaw(self,epsilon = 1e-8, n_points = 10):\n        \"\"\"\n        Compute the GPD parameters estimation with the Grimshaw's trick\n        \n        Parameters\n        ----------\n        epsilon : float\n\t\t    numerical parameter to perform (default : 1e-8)\n        n_points : int\n            maximum number of candidates for maximum likelihood (default : 10)\n\n        Returns\n        ----------\n        gamma_best,sigma_best,ll_best\n            gamma estimates, sigma estimates and corresponding log-likelihood\n        \"\"\"\n        def u(s):\n            return 1 + np.log(s).mean()\n            \n        def v(s):\n            return np.mean(1/s)\n        \n        def w(Y,t):\n            s = 1+t*Y\n            us = u(s)\n            vs = v(s)\n            return us*vs-1\n        \n        def jac_w(Y,t):\n            s = 1+t*Y\n            us = u(s)\n            vs = v(s)\n            jac_us = (1/t)*(1-vs)\n            jac_vs = (1/t)*(-vs+np.mean(1/s**2))\n            return us*jac_vs+vs*jac_us\n            \n    \n        Ym = self.peaks.min()\n        YM = self.peaks.max()\n        Ymean = self.peaks.mean()\n        \n        \n        a = -1/YM\n        if abs(a)<2*epsilon:\n            epsilon = abs(a)/n_points\n        \n        a = a + epsilon\n        b = 2*(Ymean-Ym)/(Ymean*Ym)\n        c = 2*(Ymean-Ym)/(Ym**2)\n    \n        # We look for possible roots\n        left_zeros = SPOT._rootsFinder(lambda t: w(self.peaks,t),\n                                 lambda t: jac_w(self.peaks,t),\n                                 (a+epsilon,-epsilon),\n                                 n_points,'regular')\n        \n        right_zeros = SPOT._rootsFinder(lambda t: w(self.peaks,t),\n                                  lambda t: jac_w(self.peaks,t),\n                                  (b,c),\n                                  n_points,'regular')\n    \n        # all the possible roots\n        zeros = np.concatenate((left_zeros,right_zeros))\n        \n        # 0 is always a solution so we initialize with it\n        gamma_best = 0\n        sigma_best = Ymean\n        ll_best = SPOT._log_likelihood(self.peaks,gamma_best,sigma_best)\n        \n        # we look for better candidates\n        for z in zeros:\n            gamma = u(1+z*self.peaks)-1\n            sigma = gamma/z\n            ll = dSPOT._log_likelihood(self.peaks,gamma,sigma)\n            if ll>ll_best:\n                gamma_best = gamma\n                sigma_best = sigma\n                ll_best = ll\n    \n        return gamma_best,sigma_best,ll_best\n\n    \n\n    def _quantile(self,gamma,sigma):\n        \"\"\"\n        Compute the quantile at level 1-q\n        \n        Parameters\n        ----------\n        gamma : float\n\t\t    GPD parameter\n        sigma : float\n            GPD parameter\n\n        Returns\n        ----------\n        float\n            quantile at level 1-q for the GPD(γ,σ,μ=0)\n        \"\"\"\n        r = self.n * self.proba / self.Nt\n        if gamma != 0:\n            return self.init_threshold + (sigma/gamma)*(pow(r,-gamma)-1)\n        else:\n            return self.init_threshold - sigma*log(r)\n\n        \n    def run(self, with_alarm = True):\n        \"\"\"\n        Run biSPOT on the stream\n        \n        Parameters\n        ----------\n        with_alarm : bool\n\t\t    (default = True) If False, SPOT will adapt the threshold assuming \\\n            there is no abnormal values\n\n\n        Returns\n        ----------\n        dict\n            keys : 'upper_thresholds', 'lower_thresholds' and 'alarms'\n            \n            '***-thresholds' contains the extreme quantiles and 'alarms' contains \\\n            the indexes of the values which have triggered alarms\n            \n        \"\"\"\n        if (self.n>self.init_data.size):\n            print('Warning : the algorithm seems to have already been run, you \\\n            should initialize before running again')\n            return {}\n        \n        # actual normal window\n        W = self.init_data[-self.depth:]\n        \n        # list of the thresholds\n        th = []\n        alarm = []\n        scores = []\n        # Loop over the stream\n        for i in tqdm.tqdm(range(self.data.size)):\n            Mi = W.mean()\n            scores.append((self.data[i]-Mi))\n            # If the observed value exceeds the current threshold (alarm case)\n            if (self.data[i]-Mi)>self.extreme_quantile:\n                # if we want to alarm, we put it in the alarm list\n                if with_alarm:\n                    alarm.append(i)\n                # otherwise we add it in the peaks\n                else:\n                    self.peaks = np.append(self.peaks,self.data[i]-Mi-self.init_threshold)\n                    self.Nt += 1\n                    self.n += 1\n                    # and we update the thresholds\n\n                    g,s,l = self._grimshaw()\n                    self.extreme_quantile = self._quantile(g,s) #+ Mi\n                    W = np.append(W[1:],self.data[i])\n\n            # case where the value exceeds the initial threshold but not the alarm ones\n            elif (self.data[i]-Mi)>self.init_threshold:\n                    # we add it in the peaks\n                    self.peaks = np.append(self.peaks,self.data[i]-Mi-self.init_threshold)\n                    self.Nt += 1\n                    self.n += 1\n                    # and we update the thresholds\n\n                    g,s,l = self._grimshaw()\n                    self.extreme_quantile = self._quantile(g,s) #+ Mi\n                    W = np.append(W[1:],self.data[i])\n            else:\n                self.n += 1\n                W = np.append(W[1:],self.data[i])\n\n                \n            th.append(self.extreme_quantile+Mi) # thresholds record\n        \n        return {'thresholds' : th, 'alarms': alarm, 'scores': scores}\n    \n\n    def plot(self,run_results, with_alarm = True):\n        \"\"\"\n        Plot the results given by the run\n        \n        Parameters\n        ----------\n        run_results : dict\n            results given by the 'run' method\n        with_alarm : bool\n\t\t    (default = True) If True, alarms are plotted.\n\n\n        Returns\n        ----------\n        list\n            list of the plots\n            \n        \"\"\"\n        x = range(self.data.size)\n        K = run_results.keys()\n        \n        ts_fig, = plt.plot(x,self.data,color=air_force_blue)\n        fig = [ts_fig]\n        \n#        if 'upper_thresholds' in K:\n#            thup = run_results['upper_thresholds']\n#            uth_fig, = plt.plot(x,thup,color=deep_saffron,lw=2,ls='dashed')\n#            fig.append(uth_fig)\n#            \n#        if 'lower_thresholds' in K:\n#            thdown = run_results['lower_thresholds']\n#            lth_fig, = plt.plot(x,thdown,color=deep_saffron,lw=2,ls='dashed')\n#            fig.append(lth_fig)\n        \n        if 'thresholds' in K:\n            th = run_results['thresholds']\n            th_fig, = plt.plot(x,th,color=deep_saffron,lw=2,ls='dashed')\n            fig.append(th_fig)\n        \n        if with_alarm and ('alarms' in K):\n            alarm = run_results['alarms']\n            if len(alarm)>0:\n                plt.scatter(alarm,self.data[alarm],color='red')\n            \n        plt.xlim((0,self.data.size))\n\n        \n        return fig"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/tasks/__init__.py",
    "content": "from .anomaly_detection import eval_anomaly_detection, eval_anomaly_detection_coldstart\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/tasks/anomaly_detection.py",
    "content": "import numpy as np\nimport time\nimport bottleneck as bn\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events\nfrom other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc\nfrom other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events\n\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport bottleneck as bn\nimport pdb\n\nfrom tadpak import evaluate\n\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\n# set missing = 0\ndef reconstruct_label(timestamp, label):\n    timestamp = np.asarray(timestamp, np.int64)\n    index = np.argsort(timestamp)\n\n    timestamp_sorted = np.asarray(timestamp[index])\n    interval = np.min(np.diff(timestamp_sorted))\n\n    label = np.asarray(label, np.int64)\n    label = np.asarray(label[index])\n\n    idx = (timestamp_sorted - timestamp_sorted[0]) // interval\n\n    new_label = np.zeros(shape=((timestamp_sorted[-1] - timestamp_sorted[0]) // interval + 1,), dtype=np.int)\n    new_label[idx] = label\n\n    return new_label\n\n\ndef eval_ad_result(test_pred_list, test_labels_list, test_timestamps_list, delay, pred_scores=None):\n    labels = []\n    pred = []\n    ts_scores = []\n    if pred_scores is not None:\n        for test_pred, test_labels, test_timestamps, test_score in zip(test_pred_list, test_labels_list, test_timestamps_list, pred_scores):\n            # assert test_pred.shape == test_labels.shape == test_timestamps.shape\n            min_len = min(min(test_pred.shape[0], test_labels.shape[0]), test_timestamps.shape[0])\n            test_pred = test_pred[:min_len]\n            test_labels = test_labels[:min_len]\n            test_timestamps = test_timestamps[:min_len]\n            test_score = test_score[:min_len]\n            min_len = min(min(test_pred.shape[0], test_labels.shape[0]), test_timestamps.shape[0])\n            test_pred = test_pred[:min_len]\n            test_labels = test_labels[:min_len]\n            test_timestamps = test_timestamps[:min_len]\n            test_labels = reconstruct_label(test_timestamps, test_labels)\n            test_pred = reconstruct_label(test_timestamps, test_pred)\n            test_pred = get_range_proba(test_pred, test_labels, delay)\n            labels.append(test_labels)\n            pred.append(test_pred)\n            ts_scores.append(test_score)\n    else:\n        for test_pred, test_labels, test_timestamps in zip(test_pred_list, test_labels_list, test_timestamps_list):\n            # assert test_pred.shape == test_labels.shape == test_timestamps.shape\n            test_labels = reconstruct_label(test_timestamps, test_labels)\n            test_pred = reconstruct_label(test_timestamps, test_pred)\n            test_pred = get_range_proba(test_pred, test_labels, delay)\n            labels.append(test_labels)\n            pred.append(test_pred)\n    labels = np.concatenate(labels)\n    pred = np.concatenate(pred)\n    if pred_scores is not None:\n        ts_scores = np.concatenate(ts_scores)\n\n    events_pred = convert_vector_to_events(pred)\n    events_gt = convert_vector_to_events(labels)\n\n    Trange = (0, len(labels))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n    eval_res = {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred),\n        \"Affiliation precision\": affiliation['precision'],\n        \"Affiliation recall\": affiliation['recall'],\n        \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n        \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n        \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n        \"VUS_PR\": vus_results[\"VUS_PR\"]\n    }\n    if pred_scores is not None:\n        # pred_scores = np.asarray(res_log_socres, np.float64)[0]\n        # labels = np.asarray(labels_log, np.int64)[0]\n        min_len1 = min(ts_scores.shape[0], labels.shape[0])\n        results_f1_pa_k_10 = evaluate.evaluate(ts_scores[:min_len1], labels[:min_len1], k=10)\n        # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n        results_f1_pa_k_50 = evaluate.evaluate(ts_scores[:min_len1], labels[:min_len1], k=50)\n        results_f1_pa_k_90 = evaluate.evaluate(ts_scores[:min_len1], labels[:min_len1], k=90)\n\n        eval_res['f1_pa_10'] = results_f1_pa_k_10['best_f1_w_pa']\n        eval_res['f1_pa_50'] = results_f1_pa_k_50['best_f1_w_pa']\n        eval_res['f1_pa_90'] = results_f1_pa_k_90['best_f1_w_pa']\n\n    return eval_res\n\n\n\n\ndef np_shift(arr, num, fill_value=np.nan):\n    result = np.empty_like(arr)\n    if num > 0:\n        result[:num] = fill_value\n        result[num:] = arr[:-num]\n    elif num < 0:\n        result[num:] = fill_value\n        result[:num] = arr[-num:]\n    else:\n        result[:] = arr\n    return result\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n\ndef eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay, is_multi=False, ucr_index=None):\n    t = time.time()\n    \n    all_train_repr = {}\n    all_test_repr = {}\n    all_train_repr_wom = {}\n    all_test_repr_wom = {}\n\n    if is_multi:\n        train_data = all_train_data\n        test_data = all_test_data\n        if test_data.shape[-1] > 2:\n            re_t = test_data.shape[-1]\n        else:\n            re_t = 1\n        full_repr = model.encode(\n            np.concatenate([train_data, test_data]).reshape(1, -1, re_t),\n            mask='mask_last',\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_train_repr[0] = full_repr[:len(train_data)]  # (n_timestamps, repr-dims)\n        all_test_repr[0] = full_repr[len(train_data):]  # (n_timestamps, repr-dims)\n\n        full_repr_wom = model.encode(\n            np.concatenate([train_data, test_data]).reshape(1, -1, re_t),\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_train_repr_wom[0] = full_repr_wom[:len(train_data)]  # (n_timestamps, repr-dims)\n        all_test_repr_wom[0] = full_repr_wom[len(train_data):]  # (n_timestamps, repr-dims)\n    else:\n        for k in all_train_data:\n            train_data = all_train_data[k]\n            test_data = all_test_data[k]\n\n            full_repr = model.encode(\n                np.concatenate([train_data, test_data]).reshape(1, -1, 1),\n                mask='mask_last',\n                casual=True,\n                sliding_length=1,\n                sliding_padding=200,\n                batch_size=256\n            ).squeeze()\n            all_train_repr[k] = full_repr[:len(train_data)] # (n_timestamps, repr-dims)\n            all_test_repr[k] = full_repr[len(train_data):] # (n_timestamps, repr-dims)\n\n            full_repr_wom = model.encode(\n                np.concatenate([train_data, test_data]).reshape(1, -1, 1),\n                casual=True,\n                sliding_length=1,\n                sliding_padding=200,\n                batch_size=256\n            ).squeeze()\n            all_train_repr_wom[k] = full_repr_wom[:len(train_data)] # (n_timestamps, repr-dims)\n            all_test_repr_wom[k] = full_repr_wom[len(train_data):] # (n_timestamps, repr-dims)\n\n            # print(np.shape(all_train_repr[k]))\n            # print(np.shape(all_test_repr[k]))\n            # print(np.shape(all_train_repr_wom[k]))\n            # print(np.shape(all_test_repr_wom[k]))\n            # print(\"#####################\")\n            # raise Exception('my personal exception!')\n        \n    res_log = []\n    res_log_socres = []\n    labels_log = []\n    timestamps_log = []\n    if is_multi:\n\n        test_labels = all_test_labels\n        test_timestamps = all_test_timestamps\n\n        train_err = np.abs(all_train_repr_wom[0] - all_train_repr[0]).sum(axis=1)\n        test_err = np.abs(all_test_repr_wom[0] - all_test_repr[0]).sum(axis=1)\n\n        ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n        train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n        test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n        train_err_adj = train_err_adj[22:]\n\n        thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n        test_res = (test_err_adj > thr) * 1\n        res_log_socres.append(test_err_adj)\n\n        for i in range(len(test_res)):\n            if i >= delay and test_res[i - delay:i].sum() >= 1:\n                test_res[i] = 0\n\n        res_log.append(test_res)\n        labels_log.append(test_labels)\n        timestamps_log.append(test_timestamps)\n    else:\n        for k in all_train_data:\n            test_labels = all_test_labels[k]\n            test_timestamps = all_test_timestamps[k]\n\n            train_err = np.abs(all_train_repr_wom[k] - all_train_repr[k]).sum(axis=1)\n            test_err = np.abs(all_test_repr_wom[k] - all_test_repr[k]).sum(axis=1)\n\n            ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n            train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n            test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n            train_err_adj = train_err_adj[22:]\n\n            thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n            test_res = (test_err_adj > thr) * 1\n            res_log_socres.append(test_err_adj)\n\n            for i in range(len(test_res)):\n                if i >= delay and test_res[i-delay:i].sum() >= 1:\n                    test_res[i] = 0\n\n            res_log.append(test_res)\n            labels_log.append(test_labels)\n            timestamps_log.append(test_timestamps)\n    t = time.time() - t\n\n\n\n    if is_multi:\n        labels = np.asarray(labels_log, np.int64)[0]\n        pred = np.asarray(res_log, np.int64)[0]\n        # print(\"labels.shape = \", labels.shape, labels[:5])\n        # print(\"pred.shape = \", pred.shape, pred[:5])\n\n\n\n        events_pred = convert_vector_to_events(pred)\n        events_gt = convert_vector_to_events(labels)\n\n        Trange = (0, len(labels))\n\n        # print(\"labels.shape = \", labels.shape, \"pred.shape = \", pred.shape)\n        # print(\"events_pred.shape = \", len(events_pred), \", events_gt.shape = \", len(events_gt), \", Trange = \", Trange)\n        if ucr_index == 79 or ucr_index == 108 or ucr_index == 187 or ucr_index == 203:\n            pred_scores = np.asarray(res_log_socres, np.float64)[0]\n\n            # results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n            # # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n            # results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n            # results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n            labels, pred = adjustment(labels, pred)\n\n            eval_res = {\n                'f1': f1_score(labels, pred),\n                'precision': precision_score(labels, pred),\n                'recall': recall_score(labels, pred),\n                \"Affiliation precision\": None,\n                \"Affiliation recall\": None,\n                \"R_AUC_ROC\": None,\n                \"R_AUC_PR\": None,\n                \"VUS_ROC\": None,\n                \"VUS_PR\": None,\n                'f1_pa_10': None,\n                # 'results_f1_pa_k_10_th_w_pa': results_f1_pa_k_10['pa_f1_scores'],\n                'f1_pa_50': None,\n                # 'results_f1_pa_k_50_th_w_pa': results_f1_pa_k_50['pa_f1_scores'],\n                'f1_pa_90': None,\n                # 'results_f1_pa_k_90_th_w_pa': results_f1_pa_k_90['pa_f1_scores'],\n\n                # 'results_f1_pa_k_10_wpa': f1_score(labels, results_f1_pa_k_10),\n                # # 'results_f1_pa_k_10_th_w_pa': results_f1_pa_k_10['best_f1_th_w_pa'],\n                # 'results_f1_pa_k_50_wpa': f1_score(labels, results_f1_pa_k_50),\n                # # 'results_f1_pa_k_50_th_w_pa': results_f1_pa_k_50['best_f1_th_w_pa'],\n                # 'results_f1_pa_k_90_wpa': f1_score(labels, results_f1_pa_k_90),\n                # 'results_f1_pa_k_90_th_w_pa': results_f1_pa_k_90['best_f1_th_w_pa'],\n            }\n        else:\n\n            affiliation = pr_from_events(events_pred, events_gt, Trange)\n            vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n            pred_scores = np.asarray(res_log_socres, np.float64)[0]\n\n            # print(\"pred_scores.shape = \", pred_scores.shape, labels.shape)\n            # print(\"pred_scores.shape = \", pred_scores[:10])\n            # print(\"labels.shape = \", labels[:10])\n\n            results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n            # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n            results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n            results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n            labels, pred = adjustment(labels, pred)\n\n            eval_res = {\n            'f1': f1_score(labels, pred),\n            'precision': precision_score(labels, pred),\n            'recall': recall_score(labels, pred),\n                \"Affiliation precision\": affiliation['precision'],\n                \"Affiliation recall\": affiliation['recall'],\n                \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n                \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n                \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n                \"VUS_PR\": vus_results[\"VUS_PR\"],\n                'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n                # 'results_f1_pa_k_10_th_w_pa': results_f1_pa_k_10['pa_f1_scores'],\n                'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n                # 'results_f1_pa_k_50_th_w_pa': results_f1_pa_k_50['pa_f1_scores'],\n                'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n                # 'results_f1_pa_k_90_th_w_pa': results_f1_pa_k_90['pa_f1_scores'],\n\n                # 'results_f1_pa_k_10_wpa': f1_score(labels, results_f1_pa_k_10),\n                # # 'results_f1_pa_k_10_th_w_pa': results_f1_pa_k_10['best_f1_th_w_pa'],\n                # 'results_f1_pa_k_50_wpa': f1_score(labels, results_f1_pa_k_50),\n                # # 'results_f1_pa_k_50_th_w_pa': results_f1_pa_k_50['best_f1_th_w_pa'],\n                # 'results_f1_pa_k_90_wpa': f1_score(labels, results_f1_pa_k_90),\n                # 'results_f1_pa_k_90_th_w_pa': results_f1_pa_k_90['best_f1_th_w_pa'],\n            }\n    else:\n        # pred_scores = np.asarray(res_log_socres, np.float64)\n        # print(\"pred_scores.shape = \", pred_scores.shape)\n        # results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n        # # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n        # results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n        # results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n        eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay, pred_scores=res_log_socres)\n\n\n\n    eval_res['infer_time'] = t\n    return res_log, eval_res\n\n\ndef eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):\n    t = time.time()\n    \n    all_data = {}\n    all_repr = {}\n    all_repr_wom = {}\n    for k in all_train_data:\n        all_data[k] = np.concatenate([all_train_data[k], all_test_data[k]])\n        all_repr[k] = model.encode(\n            all_data[k].reshape(1, -1, 1),\n            mask='mask_last',\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_repr_wom[k] = model.encode(\n            all_data[k].reshape(1, -1, 1),\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        \n    res_log = []\n    labels_log = []\n    timestamps_log = []\n    for k in all_data:\n        data = all_data[k]\n        labels = np.concatenate([all_train_labels[k], all_test_labels[k]])\n        timestamps = np.concatenate([all_train_timestamps[k], all_test_timestamps[k]])\n        \n        err = np.abs(all_repr_wom[k] - all_repr[k]).sum(axis=1)\n        ma = np_shift(bn.move_mean(err, 21), 1)\n        err_adj = (err - ma) / ma\n        \n        MIN_WINDOW = len(data) // 10\n        thr = bn.move_mean(err_adj, len(err_adj), MIN_WINDOW) + 4 * bn.move_std(err_adj, len(err_adj), MIN_WINDOW)\n        res = (err_adj > thr) * 1\n        \n        for i in range(len(res)):\n            if i >= delay and res[i-delay:i].sum() >= 1:\n                res[i] = 0\n\n        res_log.append(res[MIN_WINDOW:])\n        labels_log.append(labels[MIN_WINDOW:])\n        timestamps_log.append(timestamps[MIN_WINDOW:])\n    t = time.time() - t\n    \n    eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay)\n    eval_res['infer_time'] = t\n    return res_log, eval_res\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train.py",
    "content": "import torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom ts2vec import TS2Vec\nimport tasks\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\ndef save_checkpoint_callback(\n    save_every=1,\n    unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n    return callback\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('dataset', help='The dataset name')\n    parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, required=True, help='The data loader used to load the experimental data. This can be set to anomaly or anomaly_coldstart')\n    parser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None, help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None, help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', action=\"store_true\", help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n    args = parser.parse_args()\n    \n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n    \n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n    \n    print('Loading data... ', end='')\n    if args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data = datautils.gen_ano_train_data(all_train_data)\n        \n    elif args.loader == 'anomaly_coldstart':\n        task_type = 'anomaly_detection_coldstart'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data, _, _, _ = datautils.load_UCR('FordA')\n        \n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n        \n        \n    if args.irregular > 0:\n        raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n    \n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n    \n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n    \n    t = time.time()\n    \n    model = TS2Vec(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.fit(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\")\n    print(\"Training time(seconds): \", t)\n\n    if args.eval:\n        if task_type == 'anomaly_detection':\n            out, eval_res = tasks.eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        elif task_type == 'anomaly_detection_coldstart':\n            out, eval_res = tasks.eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/trainATbatch.py",
    "content": "import logging\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset, SequentialSampler\nfrom utils import data_slice\nimport datautils\nimport pdb\nfrom transformers.optimization import AdamW, get_cosine_schedule_with_warmup\nfrom sklearn.metrics import f1_score\nimport tasks\nfrom ATmodelbatch import AnomalyTransformer\nimport time\nimport bottleneck as bn\nimport argparse\nimport os\nimport pickle\n\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport bottleneck as bn\nimport pdb\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\nif torch.cuda.is_available():\n    torch.set_default_tensor_type('torch.cuda.DoubleTensor')\nelse:\n    torch.set_default_tensor_type('torch.DoubleTensor')\n\nlogger = logging.getLogger(__name__)\n\n\nclass Config:\n    window_size = 100\n    shuffle = True\n    epochs = 3\n    warmup_ratio = 0.1\n    lr = 10e-4\n    adam_epsilon = 1e-6\n    batch_size = 32\n\n    in_channel = 1\n    dataset_name = \"kpi\"\n    d_model = 512\n    layers = 3\n    lambda_ = 3\n\n    save_dir = './save_models'\n    save_every_epoch = 2\n\n    is_train = False\n    is_eval = True\n\n\ndef train(config, model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels,\n          all_test_timestamps, delay):\n    # train_data = datautils.gen_ano_train_data(all_train_data)\n\n    train_data = all_train_data\n    config.in_channel = train_data.shape[-1]\n    train_data = data_slice(train_data, config.window_size)\n    train_data = torch.from_numpy(train_data)\n\n    if torch.cuda.is_available():\n        train_data = train_data.cuda()\n\n    train_dataset = TensorDataset(train_data)\n    train_dataloader = DataLoader(train_dataset, batch_size=min(config.batch_size, len(train_dataset)),\n                                  shuffle=config.shuffle, drop_last=True, generator=torch.Generator(device='cuda:0'))\n\n    total_steps = int(len(train_dataloader) * config.epochs)\n    warmup_steps = max(int(total_steps * config.warmup_ratio), 200)\n    optimizer = AdamW(\n        model.parameters(),\n        lr=config.lr,\n        eps=config.adam_epsilon,\n    )\n    scheduler = get_cosine_schedule_with_warmup(\n        optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps\n    )\n    print(\"Total steps: {}\".format(total_steps))\n    print(\"Warmup steps: {}\".format(warmup_steps))\n\n    for epoch in range(int(config.epochs)):\n        print(epoch)\n        if (epoch + 1) % config.save_every_epoch == 0:\n            path = config.save_dir + '/' + model.to_string() + '_epoch:%d' % (epoch + 1)\n            os.makedirs(path, exist_ok=True)\n            torch.save(model, path + '/model.pt')\n            pdb.set_trace()\n            f1, pre, recall = evaluate(config, epoch + 1, model, all_train_data, all_train_labels, all_train_timestamps,\n                                       all_test_data, all_test_labels, all_test_timestamps, delay)\n            print('epoch:%d\\tf1:%f\\tp:%f\\tr:%f' % (epoch + 1, f1, pre, recall))\n\n        model.zero_grad()\n        for step, batch in enumerate(train_dataloader):\n            batch = batch[0]\n            model(batch)\n            min_loss = model.min_loss(batch)\n            max_loss = model.max_loss(batch)\n            print('minloss:%f\\tmaxloss:%f' % (min_loss.detach().cpu(),max_loss.detach().cpu()))\n            optimizer.zero_grad()\n            min_loss.backward(retain_graph=True)\n            max_loss.backward()\n            optimizer.step()\n            scheduler.step()\n\n\ndef np_shift(arr, num, fill_value=np.nan):\n    result = np.empty_like(arr)\n    if num > 0:\n        result[:num] = fill_value\n        result[num:] = arr[:-num]\n    elif num < 0:\n        result[num:] = fill_value\n        result[:num] = arr[-num:]\n    else:\n        result[:] = arr\n    return result\n\n\n# set missing = 0\ndef reconstruct_label(timestamp, label):\n    timestamp = np.asarray(timestamp, np.int64)\n    index = np.argsort(timestamp)\n\n    timestamp_sorted = np.asarray(timestamp[index])\n    interval = np.min(np.diff(timestamp_sorted))\n\n    label = np.asarray(label, np.int64)\n    label = np.asarray(label[index])\n\n    idx = (timestamp_sorted - timestamp_sorted[0]) // interval\n\n    new_label = np.zeros(shape=((timestamp_sorted[-1] - timestamp_sorted[0]) // interval + 1,), dtype=np.int)\n    new_label[idx] = label\n\n    return new_label\n\n\ndef eval_ad_result(test_pred_list, test_labels_list, test_timestamps_list, delay):\n    labels = []\n    pred = []\n    for test_pred, test_labels, test_timestamps in zip(test_pred_list, test_labels_list, test_timestamps_list):\n        assert test_pred.shape == test_labels.shape == test_timestamps.shape\n        test_labels = reconstruct_label(test_timestamps, test_labels)\n        test_pred = reconstruct_label(test_timestamps, test_pred)\n        test_pred = get_range_proba(test_pred, test_labels, delay)\n        labels.append(test_labels)\n        pred.append(test_pred)\n    labels = np.concatenate(labels)\n    pred = np.concatenate(pred)\n    return {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred)\n    }\n\n\ndef evaluate(config, cur_epoch, model, all_train_data, all_train_labels, all_train_timestamps, all_test_data,\n             all_test_labels, all_test_timestamps, delay):\n    res_log = []\n    labels_log = []\n    timestamps_log = []\n    t = time.time()\n    for k in all_train_data:\n        print(\"k = \", k)\n        train_data = all_train_data[k]\n        train_labels = all_train_labels[k]\n        train_timestamps = all_train_timestamps[k]\n        train_length = train_labels.shape[0]\n\n        test_data = all_test_data[k]\n        test_labels = all_test_labels[k]\n        test_timestamps = all_test_timestamps[k]\n        test_length = test_labels.shape[0]\n\n        train_err = model.anomaly_score_whole(train_data).detach().cpu().numpy()\n        test_err = model.anomaly_score_whole(test_data).detach().cpu().numpy()\n\n        train_err = train_err[:train_length]\n        test_err = test_err[:test_length]\n\n        ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n        train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n        test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n        train_err_adj = train_err_adj[22:]\n\n        thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n        test_res = (test_err_adj > thr) * 1\n\n        for i in range(len(test_res)):\n            if i >= delay and test_res[i - delay:i].sum() >= 1:\n                test_res[i] = 0\n        res_log.append(test_res)\n        labels_log.append(test_labels)\n        timestamps_log.append(test_timestamps)\n\n        break\n\n    t = time.time() - t\n    eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay)\n    eval_res['infer_time'] = t\n    '''\n    eval_res:{'f1':,'p':,'r':,}\n    '''\n    '''save_results'''\n    path = config.save_dir + '/' + model.to_string() + '_epoch:%d' % (cur_epoch)\n    os.makedirs(path, exist_ok=True)\n    with open(path + '/res_log.pkl', 'wb') as f:\n        pickle.dump(res_log, f)\n    with open(path + '/eval_res.pkl', 'wb') as f:\n        pickle.dump(eval_res, f)\n    with open(path + '/results.txt', 'w') as f:\n        f.write('f1:%f\\tp:%f\\tr:%f\\n' % (eval_res['f1'], eval_res['precision'], eval_res['recall']))\n\n    return eval_res['f1'], eval_res['precision'], eval_res['recall']\n\n\ndef main(config):\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', default='kpi',\n                        help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--is_multi', default=False, help='The dataset name, yahoo, kpi')\n    parser.add_argument('--datapath', default='./datasets/', help='')\n    parser.add_argument('--index', type=int, default=143, help='')\n    parser.add_argument('--batch_size', type=int, default=32, help='The batch size (defaults to 8)')\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='anomaly_transformer_0719.csv')\n    args = parser.parse_args()\n\n    config.dataset_name = args.dataset\n\n    if args.is_multi:\n        from datasets.data_loader import get_loader_segment\n\n        data_path = args.datapath + args.dataset + '/'\n        print(\"data_path = \", data_path)\n        _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                                  mode='train',\n                                                  dataset=args.dataset)\n\n        all_train_data = train_data_loader.train\n        all_train_labels = None\n        all_train_timestamps = None\n        all_test_data = train_data_loader.test\n        all_test_labels = train_data_loader.test_labels\n        all_test_timestamps = None\n        delay = 5\n\n        print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n              all_test_labels.shape)\n        all_train_data = np.expand_dims(all_train_data, axis=0)\n        print(\"train_data.shape = \", all_train_data.shape)\n        print(\"Read Success!!!\")\n        config.in_channel = all_train_data.shape[-1]\n    else:\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n            args.dataset)\n\n        # i = 1\n        # for k in all_test_data:\n        #     print(\"i = \", i, \", k = \", k)\n        #     print(\"all_train_data.shape = \", all_train_data[k].shape)\n        #     print(\"all_train_labels.shape = \", all_train_labels[k].shape)\n        #     print(\"all_train_timestamps.shape = \", all_train_timestamps[k].shape)\n        #     print(\"all_test_data.shape = \", all_test_data[k].shape)\n        #     print(\"all_test_labels.shape = \", all_test_labels[k].shape)\n        #     print(\"all_test_timestamps.shape = \", all_test_timestamps[k].shape)\n        #     i = i + 1\n        # if i > 2:\n        #     break\n        # all_train_data = datautils.gen_ano_train_data(all_train_data)\n        # print(\"train_data.shape = \", all_train_data.shape)\n\n    # all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n    #     config.dataset_name)\n\n\n\n    print('data loaded!')\n    model = AnomalyTransformer(config.batch_size, config.window_size, config.in_channel, config.d_model, config.layers,\n                               config.lambda_)\n    model = model.cuda()\n    print('model builded!')\n    print('train start!')\n    if config.is_train:\n        model.train()\n        train(config, model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels,\n              all_test_timestamps, delay)\n        '''save_trained_model'''\n        path = config.save_dir + '/' + model.to_string() + '_epoch:%d' % (config.epochs)\n        os.makedirs(path, exist_ok=True)\n        torch.save(model, path + '/model.pt')\n\n    print('train finished! evaluating...')\n    if config.is_eval:\n        model.eval()\n        res_log, eval_res = evaluate(config, config.epochs, model, all_train_data, all_train_labels,\n                                     all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n\n        print(\"res_log = \", res_log, \", eval_res = \", eval_res)\n\n    print('evaluate finished!')\n\n\nif __name__ == \"__main__\":\n    config = Config()\n    main(config)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_at_multi.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport os\nimport argparse\n\nfrom torch.backends import cudnn\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\nfrom other_anomaly_baselines.AT_solver import Solver, mkdir\nimport torch.multiprocessing as mp\nimport numpy as np\n\n# 更改共享策略\nmp.set_sharing_strategy('file_system')\n\n\n\ndef str2bool(v):\n    return v.lower() in ('true')\n\n\ndef main(config, train_set, train_loader, val_set, val_loader, test_set, test_loader, dev_cuda):\n    cudnn.benchmark = True\n    if (not os.path.exists(config.model_save_path)):\n        mkdir(config.model_save_path)\n    solver = Solver(vars(config), train_set, train_loader, val_set, val_loader, test_set, test_loader, dev_cuda)\n\n    # if config.mode == 'train':\n    solver.train()\n    # elif config.mode == 'test':\n    eval_res = solver.test(ucr_index=config.index)\n\n    print(\"result_dict = \", eval_res)\n\n    eval_res['dataset'] = config.dataset + str(config.index)\n    import pandas as pd\n\n    # 转换字典为 DataFrame\n    df = pd.DataFrame([eval_res])\n    # 指定保存路径\n    save_path = config.save_dir + config.save_csv_name\n\n    # 转换字典为 DataFrame\n    df_new = pd.DataFrame([eval_res])\n\n    # 检查文件是否存在\n    if os.path.exists(save_path):\n        # 文件存在，读取现有数据\n        df_existing = pd.read_csv(save_path, index_col=0)\n        # 将新数据附加到现有数据框中\n        df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n    else:\n        # 文件不存在，创建新的数据框\n        df_combined = df_new\n\n    # 保存 DataFrame 为 CSV 文件\n    df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    return solver\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument('--lr', type=float, default=1e-4)\n    parser.add_argument('--num_epochs', type=int, default=3)\n    parser.add_argument('--k', type=int, default=3)\n    parser.add_argument('--win_size', type=int, default=100)\n    parser.add_argument('--input_c', type=int, default=38)\n    parser.add_argument('--output_c', type=int, default=38)\n    parser.add_argument('--batch_size', type=int, default=32)\n    parser.add_argument('--pretrained_model', type=str, default=None)\n    parser.add_argument('--dataset', type=str, default='UCR')    ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water, UCR\n    parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])\n    # parser.add_argument('--data_path', type=str, default='./dataset/creditcard_ts.csv')\n    parser.add_argument('--data_path', type=str, default='datasets/')\n    parser.add_argument('--model_save_path', type=str, default='checkpoints')\n    parser.add_argument('--anormly_ratio', type=float, default=0.9)\n    parser.add_argument('--index', type=int, default=143, help='')\n    parser.add_argument('--cuda', type=str, default='cuda:0')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='at_ucr_0727.csv')\n\n    config = parser.parse_args()\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(config.save_dir):\n        config.save_dir = '/SSD/lz/tsm_ptms_anomaly_detection/result/'\n\n    print(\"save_dir = \", config.save_dir)  # 输出检查\n\n    train_loader, train_set = get_loader_segment(config.index, config.data_path + config.dataset, batch_size=config.batch_size,\n                                                 win_size=config.win_size, mode='train', dataset=config.dataset)\n    val_loader, val_set = get_loader_segment(config.index, config.data_path + config.dataset, batch_size=config.batch_size,\n                                             win_size=config.win_size, mode='val', dataset=config.dataset)\n    test_loader, test_set = get_loader_segment(config.index, config.data_path + config.dataset, batch_size=config.batch_size,\n                                               win_size=config.win_size, mode='test', dataset=config.dataset)\n    train_set = train_set.train\n    config.input_c = train_set.shape[-1]\n    config.output_c = train_set.shape[-1]\n\n    args = vars(config)\n\n    print('------------ Options -------------')\n    for k, v in sorted(args.items()):\n        print('%s: %s' % (str(k), str(v)))\n    print('-------------- End ----------------')\n    main(config, train_set, train_loader, val_set, val_loader, test_set, test_loader, config.cuda)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_at_uni.py",
    "content": "import os\nimport sys\n\nimport numpy as np\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport os\nimport argparse\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch\nfrom torch.backends import cudnn\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\nfrom other_anomaly_baselines.AT_solver import Solver, mkdir\nimport datautils\nimport numpy as np\nimport torch.multiprocessing as mp\nmp.set_sharing_strategy('file_system')\n\n\n\nclass UniLoader(object):\n    def __init__(self, data_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size])\n\n\ndef str2bool(v):\n    return v.lower() in ('true')\n\n\ndef main(config, train_set, train_loader, val_set, val_loader, test_set, test_loader, dev_cuda, all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, train_data):\n    cudnn.benchmark = True\n    if (not os.path.exists(config.model_save_path)):\n        mkdir(config.model_save_path)\n\n    for i in range(train_data.shape[0]):\n        print(\"i = \", i, \", total num = \", train_data.shape[0])\n        print(\"train_data.shape = \", train_data.shape)\n        _train_data = train_data[i]\n        print(\"000train_data.shape = \", train_data.shape, type(train_data))\n        _train_data = np.array(_train_data)\n        print(\"111_train_data.shape = \", _train_data.shape, type(_train_data))\n\n        train_dataset = UniLoader(_train_data, config.win_size, 1)\n\n        train_loader = DataLoader(dataset=train_dataset,\n                                  batch_size=config.batch_size,\n                                  shuffle=True,\n                                  num_workers=2,\n                                  drop_last=True)\n\n        solver = Solver(vars(config), train_dataset, train_loader, val_set, val_loader, test_set, test_loader, dev_cuda)\n\n        break\n\n    # if config.mode == 'train':\n    # for _uni_train_set in train_set:\n\n    # solver.train_uni()\n    # elif config.mode == 'test':\n    eval_res = solver.test_uni(all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, config)\n\n    print(\"result_dict = \", eval_res)\n\n    eval_res['dataset'] = config.dataset + str(config.index)\n    import pandas as pd\n\n    # 转换字典为 DataFrame\n    df = pd.DataFrame([eval_res])\n    # 指定保存路径\n    save_path = config.save_dir + config.save_csv_name\n\n    # 转换字典为 DataFrame\n    df_new = pd.DataFrame([eval_res])\n\n    # 检查文件是否存在\n    if os.path.exists(save_path):\n        # 文件存在，读取现有数据\n        df_existing = pd.read_csv(save_path, index_col=0)\n        # 将新数据附加到现有数据框中\n        df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n    else:\n        # 文件不存在，创建新的数据框\n        df_combined = df_new\n\n    # 保存 DataFrame 为 CSV 文件\n    df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    return solver\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument('--lr', type=float, default=1e-4)\n    parser.add_argument('--num_epochs', type=int, default=1)\n    parser.add_argument('--k', type=int, default=3)\n    parser.add_argument('--win_size', type=int, default=100)\n    parser.add_argument('--input_c', type=int, default=38)\n    parser.add_argument('--output_c', type=int, default=38)\n    parser.add_argument('--batch_size', type=int, default=32)\n    parser.add_argument('--pretrained_model', type=str, default=None)\n    parser.add_argument('--dataset', type=str, default='yahoo')    ##  kpi, yahoo\n    parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])\n    # parser.add_argument('--data_path', type=str, default='./dataset/creditcard_ts.csv')\n    parser.add_argument('--data_path', type=str, default='datasets/')\n    parser.add_argument('--model_save_path', type=str, default='checkpoints')\n    parser.add_argument('--anormly_ratio', type=float, default=1.0)\n    parser.add_argument('--index', type=int, default=143, help='')\n    parser.add_argument('--cuda', type=str, default='cuda:0')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='at_uni_0722.csv')\n\n    config = parser.parse_args()\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(config.save_dir):\n        config.save_dir = '/SSD/lz/tsm_ptms_anomaly_detection/result/'\n\n    print(\"save_dir = \", config.save_dir)  # 输出检查\n\n    dataset = 'MSL'\n    _train_loader, _train_set = get_loader_segment(config.index, config.data_path + dataset,\n                                                 batch_size=config.batch_size,\n                                                 win_size=config.win_size, mode='train', dataset=dataset)\n\n    _train_set = _train_set.train\n\n    print(\"_train_set.shape = \", _train_set.shape)\n\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n        config.dataset)\n    train_data = datautils.gen_ano_train_data(all_train_data)\n\n    print(\"train_data.shape = \", train_data.shape)\n    _train_data = train_data[0]\n    print(\"000train_data.shape = \", train_data.shape, type(train_data))\n    _train_data = np.array(_train_data)\n    print(\"111_train_data.shape = \", _train_data.shape, type(_train_data))\n\n    train_dataset = UniLoader(_train_data, config.win_size, 1)\n\n    train_loader = DataLoader(dataset=train_dataset,\n                             batch_size=config.batch_size,\n                             shuffle=True,\n                             num_workers=2,\n                             drop_last=True)\n\n    # train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))\n    # train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True,\n    #                           drop_last=True)\n\n    val_loader = train_loader\n\n    config.input_c = train_data.shape[-1]\n    config.output_c = train_data.shape[-1]\n\n    args = vars(config)\n\n    print('------------ Options -------------')\n    for k, v in sorted(args.items()):\n        print('%s: %s' % (str(k), str(v)))\n    print('-------------- End ----------------')\n    main(config, train_dataset, train_loader, train_dataset, val_loader, train_dataset, val_loader, config.cuda, all_train_data, all_test_data,\n         all_test_labels, all_test_timestamps, delay, train_data)\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_dcdetector.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport os\nimport argparse\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom torch.backends import cudnn\nfrom other_anomaly_baselines.dcdetector_solver import Solver\nimport time\nimport warnings\nimport sys\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\n\nwarnings.filterwarnings('ignore')\n\n\ndef to_var(x, volatile=False):\n    if torch.cuda.is_available():\n        x = x.cuda()\n    return Variable(x, volatile=volatile)\n\n\ndef mkdir(directory):\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\n\n\nclass Logger(object):\n    def __init__(self, filename='default.log', add_flag=True, stream=sys.stdout):\n        self.terminal = stream\n        self.filename = filename\n        self.add_flag = add_flag\n\n    def write(self, message):\n        if self.add_flag:\n            with open(self.filename, 'a+') as log:\n                self.terminal.write(message)\n                log.write(message)\n        else:\n            with open(self.filename, 'w') as log:\n                self.terminal.write(message)\n                log.write(message)\n\n    def flush(self):\n        pass\n\n\ndef str2bool(v):\n    return v.lower() in ('true')\n\n\ndef find_nearest(array, value):\n    array = np.asarray(array)\n    idx = (np.abs(array - value)).argmin()\n    return int(array[idx - 1])\n\n\ndef main(config):\n    cudnn.benchmark = True\n    if (not os.path.exists(config.model_save_path)):\n        mkdir(config.model_save_path)\n    solver = Solver(vars(config))\n    solver.train()\n    result_dict = solver.test(ucr_index=config.index)\n    # if config.mode == 'train':\n    #     solver.train()\n    # elif config.mode == 'test':\n    #     solver.test()\n\n    return result_dict\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n\n    # Alternative\n    parser.add_argument('--win_size', type=int, default=100)\n    parser.add_argument('--patch_size', type=list, default=[5])\n    parser.add_argument('--lr', type=float, default=1e-4)\n    parser.add_argument('--loss_fuc', type=str, default='MSE')\n    parser.add_argument('--n_heads', type=int, default=1)\n    parser.add_argument('--e_layers', type=int, default=3)\n    parser.add_argument('--d_model', type=int, default=256)\n    parser.add_argument('--rec_timeseries', action='store_true', default=True)\n\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=1, help='gpu')\n    parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # Default\n    parser.add_argument('--index', type=int, default=71)\n    parser.add_argument('--num_epochs', type=int, default=3)\n    parser.add_argument('--batch_size', type=int, default=128)\n    parser.add_argument('--input_c', type=int, default=1)\n    parser.add_argument('--output_c', type=int, default=1)\n    parser.add_argument('--k', type=int, default=3)\n    parser.add_argument('--dataset', type=str, default='UCR') ## NIPS_TS_Swan  SMD  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])\n    parser.add_argument('--data_path', type=str, default='datasets/')\n    parser.add_argument('--model_save_path', type=str, default='checkpoints')\n\n    parser.add_argument('--anormly_ratio', type=float, default=1.00)\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='dcdetector_ucr_0728.csv')\n\n    config = parser.parse_args()\n    args = vars(config)\n    config.patch_size = [int(patch_index) for patch_index in config.patch_size]\n\n    if config.dataset == 'UCR':\n        batch_size_buffer = [2, 4, 8, 16, 32, 64, 128, 256]\n        data_len = np.load(config.data_path + config.dataset + \"/UCR_\" + str(config.index) + \"_train.npy\").shape[0]   ## './datasets/' +\n        config.batch_size = find_nearest(batch_size_buffer, data_len / config.win_size)\n    elif config.dataset == 'UCR_AUG':\n        batch_size_buffer = [2, 4, 8, 16, 32, 64, 128, 256]\n        data_len = np.load('./datasets/' + config.data_path + \"/UCR_AUG_\" + str(config.index) + \"_train.npy\").shape[0]\n        config.batch_size = find_nearest(batch_size_buffer, data_len / config.win_size)\n    elif config.dataset == 'SMD_Ori':\n        batch_size_buffer = [2, 4, 8, 16, 32, 64, 128, 256, 512]\n        data_len = np.load('./datasets/' + config.data_path + \"/SMD_Ori_\" + str(config.index) + \"_train.npy\").shape[0]\n        config.batch_size = find_nearest(batch_size_buffer, data_len / config.win_size)\n\n    config.use_gpu = True if torch.cuda.is_available() and config.use_gpu else False\n    if config.use_gpu and config.use_multi_gpu:\n        config.devices = config.devices.replace(' ', '')\n        device_ids = config.devices.split(',')\n        config.device_ids = [int(id_) for id_ in device_ids]\n        config.gpu = config.device_ids[0]\n\n    sys.stdout = Logger(\"./result_log/\" + config.dataset + \".log\", sys.stdout)\n    if config.mode == 'train':\n        print(\"\\n\\n\")\n        print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n        print('================ Hyperparameters ===============')\n        for k, v in sorted(args.items()):\n            print('%s: %s' % (str(k), str(v)))\n        print('====================  Train  ===================')\n\n    train_loader, train_set = get_loader_segment(config.index, config.data_path + config.dataset, batch_size=config.batch_size,\n                                                 win_size=config.win_size, mode='train', dataset=config.dataset)\n\n    train_set = train_set.train\n\n\n    print(\"train_set.shape = \", train_set.shape)\n    config.input_c = train_set.shape[-1]\n    config.output_c = train_set.shape[-1]\n\n    eval_res = main(config)\n    print(\"result_dict = \", eval_res)\n\n    eval_res['dataset'] = config.dataset + str(config.index)\n    import pandas as pd\n\n    # 转换字典为 DataFrame\n    df = pd.DataFrame([eval_res])\n    # 指定保存路径\n    save_path = config.save_dir + config.save_csv_name\n\n    # 转换字典为 DataFrame\n    df_new = pd.DataFrame([eval_res])\n\n    # 检查文件是否存在\n    if os.path.exists(save_path):\n        # 文件存在，读取现有数据\n        df_existing = pd.read_csv(save_path, index_col=0)\n        # 将新数据附加到现有数据框中\n        df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n    else:\n        # 文件不存在，创建新的数据框\n        df_combined = df_new\n\n    # 保存 DataFrame 为 CSV 文件\n    df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_dcdetector_nui.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport os\nimport argparse\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom torch.backends import cudnn\nfrom other_anomaly_baselines.dcdetector_solver import Solver\nimport time\nimport warnings\nimport sys\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\n\nimport datautils\nfrom torch.utils.data import TensorDataset, DataLoader\n\n\nimport torch.multiprocessing as mp\n\nmp.set_sharing_strategy('file_system')\n\nwarnings.filterwarnings('ignore')\n\n\nclass UniLoader(object):\n    def __init__(self, data_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size])\n\n\n\ndef to_var(x, volatile=False):\n    if torch.cuda.is_available():\n        x = x.cuda()\n    return Variable(x, volatile=volatile)\n\n\ndef mkdir(directory):\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\n\n\nclass Logger(object):\n    def __init__(self, filename='default.log', add_flag=True, stream=sys.stdout):\n        self.terminal = stream\n        self.filename = filename\n        self.add_flag = add_flag\n\n    def write(self, message):\n        if self.add_flag:\n            with open(self.filename, 'a+') as log:\n                self.terminal.write(message)\n                log.write(message)\n        else:\n            with open(self.filename, 'w') as log:\n                self.terminal.write(message)\n                log.write(message)\n\n    def flush(self):\n        pass\n\n\ndef str2bool(v):\n    return v.lower() in ('true')\n\n\ndef find_nearest(array, value):\n    array = np.asarray(array)\n    idx = (np.abs(array - value)).argmin()\n    return int(array[idx - 1])\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n\n    # Alternative\n    parser.add_argument('--win_size', type=int, default=100)\n    parser.add_argument('--patch_size', type=list, default=[5])\n    parser.add_argument('--lr', type=float, default=1e-4)\n    parser.add_argument('--loss_fuc', type=str, default='MSE')\n    parser.add_argument('--n_heads', type=int, default=1)\n    parser.add_argument('--e_layers', type=int, default=3)\n    parser.add_argument('--d_model', type=int, default=256)\n    parser.add_argument('--rec_timeseries', action='store_true', default=True)\n\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=1, help='gpu')\n    parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # Default\n    parser.add_argument('--index', type=int, default=137)\n    parser.add_argument('--num_epochs', type=int, default=3)\n    parser.add_argument('--batch_size', type=int, default=8)\n    parser.add_argument('--input_c', type=int, default=1)\n    parser.add_argument('--output_c', type=int, default=1)\n    parser.add_argument('--k', type=int, default=3)\n    # parser.add_argument('--dataset', type=str, default='NIPS_TS_Swan') ## NIPS_TS_Swan  SMD\n    parser.add_argument('--dataset', type=str, default='yahoo')  ##  kpi, yahoo\n    parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])\n    parser.add_argument('--data_path', type=str, default='datasets/')\n    parser.add_argument('--model_save_path', type=str, default='checkpoints')\n\n    parser.add_argument('--anormly_ratio', type=float, default=1.00)\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='dcdetector_uni_0722.csv')\n\n    config = parser.parse_args()\n    args = vars(config)\n    config.patch_size = [int(patch_index) for patch_index in config.patch_size]\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(config.save_dir):\n        config.save_dir = '/SSD/lz/tsm_ptms_anomaly_detection/result/'\n\n    print(\"save_dir = \", config.save_dir)  # 输出检查\n\n    # if config.dataset == 'UCR':\n    #     batch_size_buffer = [2, 4, 8, 16, 32, 64, 128, 256]\n    #     data_len = np.load(config.data_path + config.dataset + \"/UCR_\" + str(config.index) + \"_train.npy\").shape[0]   ## './datasets/' +\n    #     config.batch_size = find_nearest(batch_size_buffer, data_len / config.win_size)\n    # elif config.dataset == 'UCR_AUG':\n    #     batch_size_buffer = [2, 4, 8, 16, 32, 64, 128, 256]\n    #     data_len = np.load('./datasets/' + config.data_path + \"/UCR_AUG_\" + str(config.index) + \"_train.npy\").shape[0]\n    #     config.batch_size = find_nearest(batch_size_buffer, data_len / config.win_size)\n    # elif config.dataset == 'SMD_Ori':\n    #     batch_size_buffer = [2, 4, 8, 16, 32, 64, 128, 256, 512]\n    #     data_len = np.load('./datasets/' + config.data_path + \"/SMD_Ori_\" + str(config.index) + \"_train.npy\").shape[0]\n    #     config.batch_size = find_nearest(batch_size_buffer, data_len / config.win_size)\n\n    config.use_gpu = True if torch.cuda.is_available() and config.use_gpu else False\n    if config.use_gpu and config.use_multi_gpu:\n        config.devices = config.devices.replace(' ', '')\n        device_ids = config.devices.split(',')\n        config.device_ids = [int(id_) for id_ in device_ids]\n        config.gpu = config.device_ids[0]\n\n    sys.stdout = Logger(\"./result_log/\" + config.dataset + \".log\", sys.stdout)\n    if config.mode == 'train':\n        print(\"\\n\\n\")\n        print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n        print('================ Hyperparameters ===============')\n        for k, v in sorted(args.items()):\n            print('%s: %s' % (str(k), str(v)))\n        print('====================  Train  ===================')\n\n    # train_loader, train_set = get_loader_segment(config.index, config.data_path + config.dataset, batch_size=config.batch_size,\n    #                                              win_size=config.win_size, mode='train', dataset=config.dataset)\n    #\n    # train_set = train_set.train\n    #\n    #\n    # print(\"train_set.shape = \", train_set.shape)\n    # config.input_c = train_set.shape[-1]\n    # config.output_c = train_set.shape[-1]\n\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n        config.dataset)\n    train_data = datautils.gen_ano_train_data(all_train_data)\n\n    print(\"train_data.shape = \", train_data.shape)\n    _train_data = train_data[0]\n    print(\"000train_data.shape = \", train_data.shape, type(train_data))\n    _train_data = np.array(_train_data)\n    print(\"111_train_data.shape = \", _train_data.shape, type(_train_data))\n\n    train_dataset = UniLoader(_train_data, config.win_size, 1)\n\n    train_loader = DataLoader(dataset=train_dataset,\n                              batch_size=config.batch_size,\n                              shuffle=True,\n                              num_workers=2,\n                              drop_last=True)\n\n    val_loader = train_loader\n\n    config.input_c = train_data.shape[-1]\n    config.output_c = train_data.shape[-1]\n\n    cudnn.benchmark = True\n    if (not os.path.exists(config.model_save_path)):\n        mkdir(config.model_save_path)\n    solver = Solver(vars(config))\n\n    solver.train_loader = train_loader\n    solver.vali_loader = val_loader\n    solver.test_loader = val_loader\n    solver.thre_loader = val_loader\n\n    solver.train_uni()\n    eval_res = solver.test_uni(all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, config)\n\n    print(\"result_dict = \", eval_res)\n\n    eval_res['dataset'] = config.dataset + str(config.index)\n    import pandas as pd\n\n    # 转换字典为 DataFrame\n    df = pd.DataFrame([eval_res])\n    # 指定保存路径\n    save_path = config.save_dir + config.save_csv_name\n\n    # 转换字典为 DataFrame\n    df_new = pd.DataFrame([eval_res])\n\n    # 检查文件是否存在\n    if os.path.exists(save_path):\n        # 文件存在，读取现有数据\n        df_existing = pd.read_csv(save_path, index_col=0)\n        # 将新数据附加到现有数据框中\n        df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n    else:\n        # 文件不存在，创建新的数据框\n        df_combined = df_new\n\n    # 保存 DataFrame 为 CSV 文件\n    df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_donut.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom donut import DONUT\nimport tasks\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\ndef save_checkpoint_callback(\n    save_every=1,\n    unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n    return callback\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', default='kpi',\n                        help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--is_multi', default=False, help='The dataset name, yahoo, kpi')\n    parser.add_argument('--datapath', default='./datasets/', help='')\n    parser.add_argument('--index', type=int, default=143, help='')\n    parser.add_argument('--run_name', default='donut', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    # parser.add_argument('--loader', type=str, required=True, help='The data loader used to load the experimental data--anomaly')\n    parser.add_argument('--loader', type=str, default='anomaly',\n                        help='The data loader used to load the experimental data--anomaly')\n    parser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--latent_dim', type=int, default=100, help='The units of the hidden layer.')\n    parser.add_argument('--hidden_dim', type=int, default=3, help='The dims of the hidden representation (z).')\n    parser.add_argument('--z_kld_weight', type=float, default=1)\n    parser.add_argument('--x_kld_weight', type=float, default=1)\n    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None, help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None, help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', default=True, help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='donut_uni_0723.csv')\n\n    args = parser.parse_args()\n    \n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n    \n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n    \n    print('Loading data... ', end='')\n    if args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n\n        if args.is_multi:\n            from datasets.data_loader import get_loader_segment\n\n            data_path = args.datapath + args.dataset + '/'\n            print(\"data_path = \", data_path)\n            _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                                      mode='train',\n                                                      dataset=args.dataset)\n\n            all_train_data = train_data_loader.train\n            all_train_labels = None\n            all_train_timestamps = None\n            all_test_data = train_data_loader.test\n            all_test_labels = train_data_loader.test_labels\n            all_test_timestamps = None\n            delay = 5\n\n            print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n                  all_test_labels.shape)\n            train_data = np.expand_dims(all_train_data, axis=0)\n            print(\"train_data.shape = \", train_data.shape)\n            print(\"Read Success!!!\")\n        else:\n            all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n            train_data = datautils.gen_ano_train_data(all_train_data)\n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n        \n        \n    if args.irregular > 0:\n        raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n    \n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        latent_dim=args.latent_dim,\n        hidden_dim=args.hidden_dim,\n        z_kld_weight=args.z_kld_weight,\n        x_kld_weight=args.x_kld_weight,\n        max_train_length=args.max_train_length\n    )\n    \n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n    \n    t = time.time()\n    \n    model = DONUT(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.train(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\")\n    print(\"Training time(seconds): \", t)\n\n    if args.eval:\n        if task_type == 'anomaly_detection':\n            out, eval_res = model.evaluate(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay, is_multi=args.is_multi)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n        eval_res['dataset'] = args.dataset + str(args.index)\n        import pandas as pd\n\n        # 转换字典为 DataFrame\n        df = pd.DataFrame([eval_res])\n        # 指定保存路径\n        save_path = args.save_dir + args.save_csv_name\n\n        # 转换字典为 DataFrame\n        df_new = pd.DataFrame([eval_res])\n\n        # 检查文件是否存在\n        if os.path.exists(save_path):\n            # 文件存在，读取现有数据\n            df_existing = pd.read_csv(save_path, index_col=0)\n            # 将新数据附加到现有数据框中\n            df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n        else:\n            # 文件不存在，创建新的数据框\n            df_combined = df_new\n\n        # 保存 DataFrame 为 CSV 文件\n        df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_donut_multi.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom donut import DONUT\nimport tasks\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\n\ndef save_checkpoint_callback(\n        save_every=1,\n        unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n\n    return callback\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', default='PSM',\n                        help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--is_multi', default=True, help='The dataset name, yahoo, kpi')\n    parser.add_argument('--datapath', default='./datasets/', help='')\n    parser.add_argument('--index', type=int, default=203, help='')\n    parser.add_argument('--run_name', default='donut',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    # parser.add_argument('--loader', type=str, required=True, help='The data loader used to load the experimental data--anomaly')\n    parser.add_argument('--loader', type=str, default='anomaly',\n                        help='The data loader used to load the experimental data--anomaly')\n    parser.add_argument('--gpu', type=int, default=0,\n                        help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--latent_dim', type=int, default=100, help='The units of the hidden layer.')\n    parser.add_argument('--hidden_dim', type=int, default=3, help='The dims of the hidden representation (z).')\n    parser.add_argument('--z_kld_weight', type=float, default=1)\n    parser.add_argument('--x_kld_weight', type=float, default=1)\n    parser.add_argument('--max-train-length', type=int, default=3000,\n                        help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None,\n                        help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None,\n                        help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', default=True, help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='donut_ucr_0727.csv')\n\n    args = parser.parse_args()\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/tsm_ptms_anomaly_detection/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n\n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n\n    print('Loading data... ', end='')\n    if args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n\n        if args.is_multi:\n            from datasets.data_loader import get_loader_segment\n\n            data_path = args.datapath + args.dataset + '/'\n            print(\"data_path = \", data_path)\n            _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                                      mode='train',\n                                                      dataset=args.dataset)\n\n            all_train_data = train_data_loader.train\n            all_train_labels = None\n            all_train_timestamps = None\n            all_test_data = train_data_loader.test\n            all_test_labels = train_data_loader.test_labels\n            all_test_timestamps = None\n            delay = 5\n\n            print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n                  all_test_labels.shape)\n            train_data = np.expand_dims(all_train_data, axis=0)\n            print(\"train_data.shape = \", train_data.shape)\n            print(\"Read Success!!!\")\n        else:\n            all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n                args.dataset)\n            train_data = datautils.gen_ano_train_data(all_train_data)\n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n\n    if args.irregular > 0:\n        raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        latent_dim=args.latent_dim,\n        hidden_dim=args.hidden_dim,\n        z_kld_weight=args.z_kld_weight,\n        x_kld_weight=args.x_kld_weight,\n        max_train_length=args.max_train_length\n    )\n\n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n\n    t = time.time()\n\n    model = DONUT(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.train(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\")\n    print(\"Training time(seconds): \", t)\n\n    if args.eval:\n        if task_type == 'anomaly_detection':\n            out, eval_res = model.evaluate(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data,\n                                           all_test_labels, all_test_timestamps, delay, is_multi=args.is_multi, ucr_index=args.index)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n        eval_res['dataset'] = args.dataset + str(args.index)\n        import pandas as pd\n\n        # 转换字典为 DataFrame\n        df = pd.DataFrame([eval_res])\n        # 指定保存路径\n        save_path = args.save_dir + args.save_csv_name\n\n        # 转换字典为 DataFrame\n        df_new = pd.DataFrame([eval_res])\n\n        # 检查文件是否存在\n        if os.path.exists(save_path):\n            # 文件存在，读取现有数据\n            df_existing = pd.read_csv(save_path, index_col=0)\n            # 将新数据附加到现有数据框中\n            df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n        else:\n            # 文件不存在，创建新的数据框\n            df_combined = df_new\n\n        # 保存 DataFrame 为 CSV 文件\n        df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_dspot.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom spot import dSPOT\nimport numpy as np\nimport time\nimport datetime\nimport datautils\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport argparse\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events\nfrom other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc\nfrom other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events\nfrom tadpak import evaluate\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\n\nparser = argparse.ArgumentParser()\n# parser.add_argument('dataset', help='The dataset name')\n# parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n# parser.add_argument('--dataset', default='kpi', help='The dataset name, yahoo, kpi')\nparser.add_argument('--dataset', default='kpi',\n                help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\nparser.add_argument('--is_multi', default=False, help='The dataset name, yahoo, kpi')\nparser.add_argument('--datapath', default='./datasets/', help='')\nparser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\nparser.add_argument('--index', type=int, default=143, help='')\nparser.add_argument('--batch_size', type=int, default=8, help='The batch size (defaults to 8)')\nparser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\nparser.add_argument('--save_csv_name', type=str, default='dspot_0719.csv')\n\nargs = parser.parse_args()\n\nprint(\"Dataset:\", args.dataset)\nprint(\"Arguments:\", str(args))\n\nif args.is_multi:\n    from datasets.data_loader import get_loader_segment\n\n    data_path = args.datapath + args.dataset + '/'\n    print(\"data_path = \", data_path)\n    _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                              mode='train',\n                                              dataset=args.dataset)\n\n    all_train_data = train_data_loader.train\n    all_train_labels = None\n    all_train_timestamps = None\n    all_test_data = train_data_loader.test\n    all_test_labels = train_data_loader.test_labels\n    all_test_timestamps = None\n    delay = 5\n\n    all_train_data = np.squeeze(all_train_data)\n    all_test_data = np.squeeze(all_test_data)\n\n    print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n          all_test_labels.shape)\n    # train_data = np.expand_dims(all_train_data, axis=0)\n    # print(\"train_data.shape = \", train_data.shape)\n    print(\"Read Success!!!\")\n\nelse:\n\n    # dataset = 'kpi' # yahoo, kpi\n    print('Loading data... ', end='')\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n\n\n\nlabels = []\npred = []\nscores = []\n\nif args.is_multi:\n    train_data = all_train_data  # initial batch\n    train_labels = all_train_labels\n\n    test_data = all_test_data # stream\n    test_labels = all_test_labels\n    test_timestamps = all_test_timestamps\n\n    q = 1e-4  # risk parameter # yahoo: 1e-3\n    d = 50  # depth\n    s = dSPOT(q, d)  # DSPOT object\n    s.fit(train_data, test_data)  # data import\n    s.initialize()  # initialization step\n    results = s.run()  # run\n\n    test_thresholds = results['thresholds']\n    idx_anoamly = results['alarms']\n\n    test_pred = np.zeros(len(test_thresholds))\n    test_pred[idx_anoamly] = 1\n\n    test_pred = get_range_proba(test_pred, test_labels, delay)\n\n    labels.append(test_labels)\n    pred.append(test_pred)\n    scores.append(results['scores'])\nelse:\n    for k in all_test_data:\n        train_data = all_train_data[k] # initial batch\n        train_labels = all_train_labels[k]\n        train_timestamps = all_train_timestamps[k]\n\n        test_data = all_test_data[k] # stream\n        test_labels = all_test_labels[k]\n        test_timestamps = all_test_timestamps[k]\n\n        q = 1e-4            # risk parameter # yahoo: 1e-3\n        d = 50              # depth\n        s = dSPOT(q, d)  \t\t# DSPOT object\n        s.fit(train_data, test_data) \t# data import\n        s.initialize() \t\t# initialization step\n        results = s.run() \t# run\n\n        test_thresholds = results['thresholds']\n        idx_anoamly = results['alarms']\n\n        test_pred = np.zeros(len(test_thresholds))\n        test_pred[idx_anoamly] = 1\n\n        test_pred = get_range_proba(test_pred, test_labels, delay)\n\n        labels.append(test_labels)\n        pred.append(test_pred)\n        scores.append(results['scores'])\n\nlabels = np.concatenate(labels)\npred = np.concatenate(pred)\n\nscores = np.concatenate(scores)\n\nif args.is_multi:\n    # labels = np.asarray(labels_log, np.int64)[0]\n    # pred = np.asarray(res_log, np.int64)[0]\n    # print(\"labels.shape = \", labels.shape, labels[:5])\n    # print(\"pred.shape = \", pred.shape, pred[:5])\n\n    labels, pred = adjustment(labels, pred)\n\n    events_pred = convert_vector_to_events(pred)\n    events_gt = convert_vector_to_events(labels)\n\n    Trange = (0, len(labels))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n    pred_scores = scores\n    results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n    # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n    results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n    results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n    eval_res = {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred),\n        \"Affiliation precision\": affiliation['precision'],\n        \"Affiliation recall\": affiliation['recall'],\n        \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n        \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n        \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n        \"VUS_PR\": vus_results[\"VUS_PR\"],\n        'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n        'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n        'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n    }\n    print(\"eval_res = \", eval_res)\nelse:\n\n    print('\\nf1:', f1_score(labels, pred))\n    print('precision:', precision_score(labels, pred))\n    print('recall:', recall_score(labels, pred))\n\n    events_pred = convert_vector_to_events(pred)\n    events_gt = convert_vector_to_events(labels)\n\n    Trange = (0, len(labels))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n    eval_res = {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred),\n        \"Affiliation precision\": affiliation['precision'],\n        \"Affiliation recall\": affiliation['recall'],\n        \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n        \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n        \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n        \"VUS_PR\": vus_results[\"VUS_PR\"]\n    }\n\n    # results_f1_pa_k_10 = evaluate.evaluate(scores, labels, k=10)\n    # # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n    # results_f1_pa_k_50 = evaluate.evaluate(scores, labels, k=50)\n    # results_f1_pa_k_90 = evaluate.evaluate(scores, labels, k=90)\n    #\n    # eval_res['f1_pa_10'] = results_f1_pa_k_10['best_f1_w_pa']\n    # eval_res['f1_pa_50'] = results_f1_pa_k_50['best_f1_w_pa']\n    # eval_res['f1_pa_90'] = results_f1_pa_k_90['best_f1_w_pa']\n\n\neval_res['dataset'] = args.dataset + str(args.index)\nimport pandas as pd\nimport os\n\n# 转换字典为 DataFrame\ndf = pd.DataFrame([eval_res])\n# 指定保存路径\nsave_path = args.save_dir + args.save_csv_name\n\n# 转换字典为 DataFrame\ndf_new = pd.DataFrame([eval_res])\n\n# 检查文件是否存在\nif os.path.exists(save_path):\n    # 文件存在，读取现有数据\n    df_existing = pd.read_csv(save_path, index_col=0)\n    # 将新数据附加到现有数据框中\n    df_combined = pd.concat([df_existing, df_new], ignore_index=True)\nelse:\n    # 文件不存在，创建新的数据框\n    df_combined = df_new\n\n# 保存 DataFrame 为 CSV 文件\ndf_combined.to_csv(save_path, index=True, index_label=\"id\")\n\nprint(\"Finished.\")"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_dspot_multi.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom spot import dSPOT\nimport numpy as np\nimport time\nimport datetime\nimport datautils\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport argparse\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events\nfrom other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc\nfrom other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events\nfrom tadpak import evaluate\nimport torch.multiprocessing as mp\nimport numpy as np\n\n# 更改共享策略\nmp.set_sharing_strategy('file_system')\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\n\nparser = argparse.ArgumentParser()\n# parser.add_argument('dataset', help='The dataset name')\n# parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n# parser.add_argument('--dataset', default='kpi', help='The dataset name, yahoo, kpi')\nparser.add_argument('--dataset', default='UCR',\n                help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\nparser.add_argument('--is_multi', default=True, help='The dataset name, yahoo, kpi')\nparser.add_argument('--datapath', default='./datasets/', help='')\nparser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\nparser.add_argument('--index', type=int, default=203, help='')  ##  [79, 108, 187, 203]\nparser.add_argument('--batch_size', type=int, default=8, help='The batch size (defaults to 8)')\nparser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\nparser.add_argument('--save_csv_name', type=str, default='dspot_ucr_0727.csv')\n\nargs = parser.parse_args()\n\nprint(\"Dataset:\", args.dataset)\nprint(\"Arguments:\", str(args))\n\nif args.is_multi:\n    from datasets.data_loader import get_loader_segment\n\n    data_path = args.datapath + args.dataset + '/'\n    print(\"data_path = \", data_path)\n    _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                              mode='train',\n                                              dataset=args.dataset)\n\n    all_train_data = train_data_loader.train\n    all_train_labels = None\n    all_train_timestamps = None\n    all_test_data = train_data_loader.test\n    all_test_labels = train_data_loader.test_labels\n    all_test_timestamps = None\n    delay = 5\n\n    all_train_data = np.squeeze(all_train_data)\n    all_test_data = np.squeeze(all_test_data)\n\n    print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n          all_test_labels.shape)\n    # train_data = np.expand_dims(all_train_data, axis=0)\n    # print(\"train_data.shape = \", train_data.shape)\n    print(\"Read Success!!!\")\n\nelse:\n\n    # dataset = 'kpi' # yahoo, kpi\n    print('Loading data... ', end='')\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n\n\n\nlabels = []\npred = []\nscores = []\n\nif args.is_multi:\n    train_data = all_train_data  # initial batch\n    train_labels = all_train_labels\n\n    test_data = all_test_data # stream\n    test_labels = all_test_labels\n    test_timestamps = all_test_timestamps\n\n    q = 1e-4  # risk parameter # yahoo: 1e-3\n    d = 50  # depth\n    s = dSPOT(q, d)  # DSPOT object\n    s.fit(train_data, test_data)  # data import\n    s.initialize()  # initialization step\n    results = s.run()  # run\n\n    test_thresholds = results['thresholds']\n    idx_anoamly = results['alarms']\n\n    test_pred = np.zeros(len(test_thresholds))\n    test_pred[idx_anoamly] = 1\n\n    test_pred = get_range_proba(test_pred, test_labels, delay)\n\n    labels.append(test_labels)\n    pred.append(test_pred)\n    scores.append(results['scores'])\nelse:\n    for k in all_test_data:\n        train_data = all_train_data[k] # initial batch\n        train_labels = all_train_labels[k]\n        train_timestamps = all_train_timestamps[k]\n\n        test_data = all_test_data[k] # stream\n        test_labels = all_test_labels[k]\n        test_timestamps = all_test_timestamps[k]\n\n        q = 1e-4            # risk parameter # yahoo: 1e-3\n        d = 50              # depth\n        s = dSPOT(q, d)  \t\t# DSPOT object\n        s.fit(train_data, test_data) \t# data import\n        s.initialize() \t\t# initialization step\n        results = s.run() \t# run\n\n        test_thresholds = results['thresholds']\n        idx_anoamly = results['alarms']\n\n        test_pred = np.zeros(len(test_thresholds))\n        test_pred[idx_anoamly] = 1\n\n        test_pred = get_range_proba(test_pred, test_labels, delay)\n\n        labels.append(test_labels)\n        pred.append(test_pred)\n        scores.append(results['scores'])\n\nlabels = np.concatenate(labels)\npred = np.concatenate(pred)\n\nscores = np.concatenate(scores)\n\nif args.is_multi:\n    # labels = np.asarray(labels_log, np.int64)[0]\n    # pred = np.asarray(res_log, np.int64)[0]\n    # print(\"labels.shape = \", labels.shape, labels[:5])\n    # print(\"pred.shape = \", pred.shape, pred[:5])\n    if args.index == 79 or args.index == 108 or args.index == 187 or args.index == 203:\n        labels, pred = adjustment(labels, pred)\n        eval_res = {\n            'f1': f1_score(labels, pred),\n            'precision': precision_score(labels, pred),\n            'recall': recall_score(labels, pred),\n            \"Affiliation precision\": None,\n            \"Affiliation recall\": None,\n            \"R_AUC_ROC\": None,\n            \"R_AUC_PR\": None,\n            \"VUS_ROC\": None,\n            \"VUS_PR\": None,\n            'f1_pa_10': None,\n            'f1_pa_50': None,\n            'f1_pa_90': None,\n        }\n    else:\n        events_pred = convert_vector_to_events(pred)\n        events_gt = convert_vector_to_events(labels)\n\n        Trange = (0, len(labels))\n        affiliation = pr_from_events(events_pred, events_gt, Trange)\n        vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n        pred_scores = scores\n        results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n        # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n        results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n        results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n        labels, pred = adjustment(labels, pred)\n\n        eval_res = {\n            'f1': f1_score(labels, pred),\n            'precision': precision_score(labels, pred),\n            'recall': recall_score(labels, pred),\n            \"Affiliation precision\": affiliation['precision'],\n            \"Affiliation recall\": affiliation['recall'],\n            \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n            \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n            \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n            \"VUS_PR\": vus_results[\"VUS_PR\"],\n            'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n            'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n            'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n        }\n    print(\"eval_res = \", eval_res)\nelse:\n\n    print('\\nf1:', f1_score(labels, pred))\n    print('precision:', precision_score(labels, pred))\n    print('recall:', recall_score(labels, pred))\n\n    events_pred = convert_vector_to_events(pred)\n    events_gt = convert_vector_to_events(labels)\n\n    Trange = (0, len(labels))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n    eval_res = {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred),\n        \"Affiliation precision\": affiliation['precision'],\n        \"Affiliation recall\": affiliation['recall'],\n        \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n        \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n        \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n        \"VUS_PR\": vus_results[\"VUS_PR\"]\n    }\n\n    results_f1_pa_k_10 = evaluate.evaluate(scores, labels, k=10)\n    # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n    results_f1_pa_k_50 = evaluate.evaluate(scores, labels, k=50)\n    results_f1_pa_k_90 = evaluate.evaluate(scores, labels, k=90)\n\n    eval_res['f1_pa_10'] = results_f1_pa_k_10['best_f1_w_pa']\n    eval_res['f1_pa_50'] = results_f1_pa_k_50['best_f1_w_pa']\n    eval_res['f1_pa_90'] = results_f1_pa_k_90['best_f1_w_pa']\n\n\neval_res['dataset'] = args.dataset + str(args.index)\nimport pandas as pd\nimport os\n\n# 转换字典为 DataFrame\ndf = pd.DataFrame([eval_res])\n# 指定保存路径\nsave_path = args.save_dir + args.save_csv_name\n\n# 转换字典为 DataFrame\ndf_new = pd.DataFrame([eval_res])\n\n# 检查文件是否存在\nif os.path.exists(save_path):\n    # 文件存在，读取现有数据\n    df_existing = pd.read_csv(save_path, index_col=0)\n    # 将新数据附加到现有数据框中\n    df_combined = pd.concat([df_existing, df_new], ignore_index=True)\nelse:\n    # 文件不存在，创建新的数据框\n    df_combined = df_new\n\n# 保存 DataFrame 为 CSV 文件\ndf_combined.to_csv(save_path, index=True, index_label=\"id\")\n\nprint(\"Finished.\")"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_gpt4ts.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\n\nfrom other_anomaly_baselines.exp_anomaly_detection import Exp_Anomaly_Detection\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\n\nimport random\nimport numpy as np\n\nif __name__ == '__main__':\n    fix_seed = 42\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    parser = argparse.ArgumentParser(description='GPT4TS')\n\n    # basic config\n    parser.add_argument('--task_name', type=str, default='anomaly_detection',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, default=1, help='status')\n    parser.add_argument('--model_id', type=str, default='test', help='model id')\n    parser.add_argument('--model', type=str, default='GPT4TS',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    parser.add_argument('--data', type=str, default='UCR', help='dataset type')   ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    # parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=100, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=0, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.5, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--top_k', type=int, default=3, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=1, help='encoder input size') ## 55 for MSL, 38 for SMD, SMAP for 25, PSM for 25, SWAT for 51, NIPS_TS_Swan for 38,\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')  ## NIPS_TS_Water for 38, UCR for 1\n    parser.add_argument('--c_out', type=int, default=1, help='output size')\n    parser.add_argument('--d_model', type=int, default=8, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=1, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=16, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=3, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # patching\n    parser.add_argument('--patch_size', type=int, default=1)\n    parser.add_argument('--stride', type=int, default=1)\n    parser.add_argument('--gpt_layers', type=int, default=6)\n    parser.add_argument('--ln', type=int, default=0)\n    parser.add_argument('--mlp', type=int, default=0)\n    parser.add_argument('--weight', type=float, default=0)\n    parser.add_argument('--percent', type=int, default=5)\n\n    # Default\n    parser.add_argument('--index', type=int, default=79)\n    parser.add_argument('--data_path', type=str, default='datasets/')\n    parser.add_argument('--win_size', type=int, default=100)\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='gpt4ts_ucr_0727.csv')\n\n\n    args = parser.parse_args()\n    args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/tsm_ptms_anomaly_detection/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print(args)\n\n    Exp = Exp_Anomaly_Detection\n\n    train_loader, train_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n                                              win_size=args.win_size, mode='train', dataset=args.data)\n    val_loader, val_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n                                             win_size=args.win_size, mode='val', dataset=args.data)\n    test_loader, test_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n                                             win_size=args.win_size, mode='test', dataset=args.data)\n\n    train_set = train_set.train\n    val_set = val_set.val\n    test_set = test_set.test\n\n    print(\"train_set.shape = \", train_set.shape, \", test_set.shape = \", test_set.shape, test_set.shape[-1])\n    args.enc_in = train_set.shape[-1]\n    args.c_out = train_set.shape[-1]\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            exp = Exp(args, train_set, train_loader, val_set, val_loader, test_set, test_loader)  # set experiments\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            eval_res = exp.test(setting, dataset=args.data, ucr_index=args.index)\n            torch.cuda.empty_cache()\n\n            print(\"result_dict = \", eval_res)\n\n            eval_res['dataset'] = args.data + str(args.index)\n            import pandas as pd\n\n            # 转换字典为 DataFrame\n            df = pd.DataFrame([eval_res])\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([eval_res])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_gpt4ts_uni.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom other_anomaly_baselines.exp_anomaly_detection import Exp_Anomaly_Detection\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\nimport datautils\nimport random\nimport numpy as np\n\n\n\nclass UniLoader(object):\n    def __init__(self, data_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size])\n\n\nif __name__ == '__main__':\n    fix_seed = 42\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    parser = argparse.ArgumentParser(description='GPT4TS')\n\n    # basic config\n    parser.add_argument('--task_name', type=str, default='anomaly_detection',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, default=1, help='status')\n    parser.add_argument('--model_id', type=str, default='test', help='model id')\n    parser.add_argument('--model', type=str, default='GPT4TS',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    # parser.add_argument('--data', type=str, default='UCR', help='dataset type')   ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--data', type=str, default='kpi')  ##  kpi, yahoo\n    parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    # parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=100, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=0, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=1, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--top_k', type=int, default=3, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=1, help='encoder input size') ## 55 for MSL, 38 for SMD, SMAP for 25, PSM for 25, SWAT for 51, NIPS_TS_Swan for 38,\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')  ## NIPS_TS_Water for 38, UCR for 1\n    parser.add_argument('--c_out', type=int, default=1, help='output size')\n    parser.add_argument('--d_model', type=int, default=8, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=1, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=16, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=1, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=8, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # patching\n    parser.add_argument('--patch_size', type=int, default=1)\n    parser.add_argument('--stride', type=int, default=1)\n    parser.add_argument('--gpt_layers', type=int, default=6)\n    parser.add_argument('--ln', type=int, default=0)\n    parser.add_argument('--mlp', type=int, default=0)\n    parser.add_argument('--weight', type=float, default=0)\n    parser.add_argument('--percent', type=int, default=5)\n\n    # Default\n    parser.add_argument('--index', type=int, default=137)\n    parser.add_argument('--data_path', type=str, default='datasets/')\n    parser.add_argument('--win_size', type=int, default=100)\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='gpt4ts_uni_0717.csv')\n\n\n    args = parser.parse_args()\n    args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/tsm_ptms_anomaly_detection/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print(args)\n\n    Exp = Exp_Anomaly_Detection\n\n\n\n    # dataset = 'MSL'\n    # _train_loader, _train_set = get_loader_segment(args.index, args.data_path + dataset,\n    #                                                batch_size=args.batch_size,\n    #                                                win_size=args.win_size, mode='train', dataset=dataset)\n    #\n    # _train_set = _train_set.train\n    #\n    # print(\"_train_set.shape = \", _train_set.shape)\n\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n        args.data)\n    train_data = datautils.gen_ano_train_data(all_train_data)\n\n    print(\"train_data.shape = \", train_data.shape)\n    _train_data = train_data[0]\n    print(\"000train_data.shape = \", train_data.shape, type(train_data))\n    _train_data = np.array(_train_data)\n    print(\"111_train_data.shape = \", _train_data.shape, type(_train_data))\n\n    train_dataset = UniLoader(_train_data, args.win_size, 1)\n\n    train_loader = DataLoader(dataset=train_dataset,\n                              batch_size=args.batch_size,\n                              shuffle=True,\n                              num_workers=8,\n                              drop_last=True)\n\n\n    val_loader = train_loader\n\n    args.input_c = train_data.shape[-1]\n    args.output_c = train_data.shape[-1]\n\n    # train_loader, train_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n    #                                           win_size=args.win_size, mode='train', dataset=args.data)\n    # val_loader, val_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n    #                                          win_size=args.win_size, mode='val', dataset=args.data)\n    # test_loader, test_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n    #                                          win_size=args.win_size, mode='test', dataset=args.data)\n\n    # train_set = train_set.train\n    # val_set = val_set.val\n    # test_set = test_set.test\n\n    print(\"train_set.shape = \", _train_data.shape)\n    args.enc_in = _train_data.shape[-1]\n    args.c_out = _train_data.shape[-1]\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            exp = Exp(args, _train_data, train_loader, _train_data, train_loader, _train_data, train_loader)  # set experiments\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            for i in range(train_data.shape[0]):\n                print(\"i = \", i, \", total num = \", train_data.shape[0])\n                print(\"train_data.shape = \", train_data.shape)\n                _train_data = train_data[i]\n                print(\"000train_data.shape = \", train_data.shape, type(train_data))\n                _train_data = np.array(_train_data)\n                print(\"111_train_data.shape = \", _train_data.shape, type(_train_data))\n\n                train_dataset = UniLoader(_train_data, args.win_size, 1)\n\n                train_loader = DataLoader(dataset=train_dataset,\n                                          batch_size=args.batch_size,\n                                          shuffle=True,\n                                          num_workers=2,\n                                          drop_last=True)\n                exp.train_loader = train_loader\n                exp.train_set = _train_data\n                exp.val_loader = train_loader\n                exp.val_set = _train_data\n                exp.train_uni(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            eval_res = exp.test_uni(setting, all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, args)\n            torch.cuda.empty_cache()\n\n            print(\"result_dict = \", eval_res)\n\n            eval_res['dataset'] = args.data + str(args.index)\n            import pandas as pd\n\n            # 转换字典为 DataFrame\n            df = pd.DataFrame([eval_res])\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([eval_res])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_lstm_vae.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom lstm_vae import LSTM_VAE\nimport tasks\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\ndef save_checkpoint_callback(\n    save_every=1,\n    unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n    return callback\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # parser.add_argument('dataset', help='The dataset name')\n    # parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    # parser.add_argument('--dataset', default='kpi', help='The dataset name, yahoo, kpi')\n    parser.add_argument('--dataset', default='kpi',\n                        help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--is_multi', default=False, help='The dataset name, yahoo, kpi')\n    parser.add_argument('--datapath', default='./datasets/', help='')\n    parser.add_argument('--index', type=int, default=143, help='')\n    parser.add_argument('--run_name', default='lstm-vae',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, default='anomaly', help='The data loader used to load the experimental data--anomaly')\n    parser.add_argument('--gpu', type=int, default=1, help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch_size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--hidden_size', type=int, default=16, help='The units of the LSTM hidden layer.')\n    parser.add_argument('--hidden_dim', type=int, default=3, help='The dims of the hidden representation (z).')\n    parser.add_argument('--z_kld_weight', type=float, default=1)\n    parser.add_argument('--x_kld_weight', type=float, default=1)\n    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None, help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None, help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', default=True, help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='lstm_vae_uni_0723.csv')\n\n    args = parser.parse_args()\n    \n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n    \n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n    \n    print('Loading data... ', end='')\n    if args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n\n        if args.is_multi:\n            from datasets.data_loader import get_loader_segment\n\n            data_path = args.datapath + args.dataset + '/'\n            print(\"data_path = \", data_path)\n            _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                                      mode='train',\n                                                      dataset=args.dataset)\n\n            all_train_data = train_data_loader.train\n            all_train_labels = None\n            all_train_timestamps = None\n            all_test_data = train_data_loader.test\n            all_test_labels = train_data_loader.test_labels\n            all_test_timestamps = None\n            delay = 5\n\n            print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n                  all_test_labels.shape)\n            train_data = np.expand_dims(all_train_data, axis=0)\n            print(\"train_data.shape = \", train_data.shape)\n            print(\"Read Success!!!\")\n        else:\n            all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n\n        # i = 1\n        # for k in all_test_data:\n        #     print(\"i = \", i, \", k = \", k)\n        #     print(\"all_train_data.shape = \", all_train_data[k].shape)\n        #     print(\"all_train_labels.shape = \", all_train_labels[k].shape)\n        #     print(\"all_train_timestamps.shape = \", all_train_timestamps[k].shape)\n        #     print(\"all_test_data.shape = \", all_test_data[k].shape)\n        #     print(\"all_test_labels.shape = \", all_test_labels[k].shape)\n        #     print(\"all_test_timestamps.shape = \", all_test_timestamps[k].shape)\n        #     i = i + 1\n            # if i > 2:\n            #     break\n            train_data = datautils.gen_ano_train_data(all_train_data)\n            print(\"train_data.shape = \", train_data.shape)\n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n        \n        \n    if args.irregular > 0:\n        raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n    \n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        hidden_size=args.hidden_size,\n        hidden_dim=args.hidden_dim,\n        z_kld_weight=args.z_kld_weight,\n        x_kld_weight=args.x_kld_weight,\n        max_train_length=args.max_train_length\n    )\n    \n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n    \n    t = time.time()\n\n    print(\"train_data.shape = \", train_data.shape)\n    \n    model = LSTM_VAE(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.train(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\")\n    print(\"Training time(seconds): \", t)\n\n    if args.eval:\n        if task_type == 'anomaly_detection':\n            out, eval_res = model.evaluate(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay, is_multi=args.is_multi)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n        eval_res['dataset'] = args.dataset +  str(args.index)\n        import pandas as pd\n\n        # 转换字典为 DataFrame\n        df = pd.DataFrame([eval_res])\n        # 指定保存路径\n        save_path = args.save_dir + args.save_csv_name\n\n        # 转换字典为 DataFrame\n        df_new = pd.DataFrame([eval_res])\n\n        # 检查文件是否存在\n        if os.path.exists(save_path):\n            # 文件存在，读取现有数据\n            df_existing = pd.read_csv(save_path, index_col=0)\n            # 将新数据附加到现有数据框中\n            df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n        else:\n            # 文件不存在，创建新的数据框\n            df_combined = df_new\n\n        # 保存 DataFrame 为 CSV 文件\n        df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_lstm_vae_multi.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom lstm_vae import LSTM_VAE\nimport tasks\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\n\ndef save_checkpoint_callback(\n        save_every=1,\n        unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n\n    return callback\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # parser.add_argument('dataset', help='The dataset name')\n    # parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    # parser.add_argument('--dataset', default='kpi', help='The dataset name, yahoo, kpi')\n    parser.add_argument('--dataset', default='PSM',\n                        help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--is_multi', default=True, help='The dataset name, yahoo, kpi')\n    parser.add_argument('--datapath', default='./datasets/', help='')\n    parser.add_argument('--index', type=int, default=203, help='')  ##  [79, 108, 187, 203]\n    parser.add_argument('--run_name', default='lstm-vae',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, default='anomaly',\n                        help='The data loader used to load the experimental data--anomaly')\n    parser.add_argument('--gpu', type=int, default=1,\n                        help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch_size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--hidden_size', type=int, default=16, help='The units of the LSTM hidden layer.')\n    parser.add_argument('--hidden_dim', type=int, default=3, help='The dims of the hidden representation (z).')\n    parser.add_argument('--z_kld_weight', type=float, default=1)\n    parser.add_argument('--x_kld_weight', type=float, default=1)\n    parser.add_argument('--max-train-length', type=int, default=3000,\n                        help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None,\n                        help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None,\n                        help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', default=True, help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='lstm_vae_ucr_0727.csv')\n\n    args = parser.parse_args()\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/tsm_ptms_anomaly_detection/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n\n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n\n    print('Loading data... ', end='')\n    if args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n\n        if args.is_multi:\n            from datasets.data_loader import get_loader_segment\n\n            data_path = args.datapath + args.dataset + '/'\n            print(\"data_path = \", data_path)\n            _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                                      mode='train',\n                                                      dataset=args.dataset)\n\n            all_train_data = train_data_loader.train\n            all_train_labels = None\n            all_train_timestamps = None\n            all_test_data = train_data_loader.test\n            all_test_labels = train_data_loader.test_labels\n            all_test_timestamps = None\n            delay = 5\n\n            print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n                  all_test_labels.shape)\n            train_data = np.expand_dims(all_train_data, axis=0)\n            print(\"train_data.shape = \", train_data.shape)\n            print(\"Read Success!!!\")\n        else:\n            all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n                args.dataset)\n\n            # i = 1\n            # for k in all_test_data:\n            #     print(\"i = \", i, \", k = \", k)\n            #     print(\"all_train_data.shape = \", all_train_data[k].shape)\n            #     print(\"all_train_labels.shape = \", all_train_labels[k].shape)\n            #     print(\"all_train_timestamps.shape = \", all_train_timestamps[k].shape)\n            #     print(\"all_test_data.shape = \", all_test_data[k].shape)\n            #     print(\"all_test_labels.shape = \", all_test_labels[k].shape)\n            #     print(\"all_test_timestamps.shape = \", all_test_timestamps[k].shape)\n            #     i = i + 1\n            # if i > 2:\n            #     break\n            train_data = datautils.gen_ano_train_data(all_train_data)\n            print(\"train_data.shape = \", train_data.shape)\n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n\n    if args.irregular > 0:\n        raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        hidden_size=args.hidden_size,\n        hidden_dim=args.hidden_dim,\n        z_kld_weight=args.z_kld_weight,\n        x_kld_weight=args.x_kld_weight,\n        max_train_length=args.max_train_length\n    )\n\n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n\n    t = time.time()\n\n    print(\"train_data.shape = \", train_data.shape)\n\n    model = LSTM_VAE(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.train(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\")\n    print(\"Training time(seconds): \", t)\n\n    if args.eval:\n        if task_type == 'anomaly_detection':\n            out, eval_res = model.evaluate(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data,\n                                           all_test_labels, all_test_timestamps, delay, is_multi=args.is_multi, ucr_index=args.index)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n        eval_res['dataset'] = args.dataset + str(args.index)\n        import pandas as pd\n\n        # 转换字典为 DataFrame\n        df = pd.DataFrame([eval_res])\n        # 指定保存路径\n        save_path = args.save_dir + args.save_csv_name\n\n        # 转换字典为 DataFrame\n        df_new = pd.DataFrame([eval_res])\n\n        # 检查文件是否存在\n        if os.path.exists(save_path):\n            # 文件存在，读取现有数据\n            df_existing = pd.read_csv(save_path, index_col=0)\n            # 将新数据附加到现有数据框中\n            df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n        else:\n            # 文件不存在，创建新的数据框\n            df_combined = df_new\n\n        # 保存 DataFrame 为 CSV 文件\n        df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_spot.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom spot import SPOT\nimport numpy as np\nimport time\nimport datetime\nimport datautils\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport argparse\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events\nfrom other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc\nfrom other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events\nfrom tadpak import evaluate\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\nparser = argparse.ArgumentParser()\n# parser.add_argument('dataset', help='The dataset name')\n# parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n# parser.add_argument('--dataset', default='kpi', help='The dataset name, yahoo, kpi')\nparser.add_argument('--dataset', default='kpi',\n                help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\nparser.add_argument('--is_multi', type=bool, default=False, help='The dataset name, yahoo, kpi')\nparser.add_argument('--datapath', default='./datasets/', help='')\nparser.add_argument('--index', type=int, default=143, help='')\nparser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\nparser.add_argument('--batch_size', type=int, default=8, help='The batch size (defaults to 8)')\nparser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\nparser.add_argument('--save_csv_name', type=str, default='spot_0715.csv')\n\nargs = parser.parse_args()\n\nprint(\"Dataset:\", args.dataset)\nprint(\"Arguments:\", str(args))\n\nif args.is_multi:\n    from datasets.data_loader import get_loader_segment\n\n    data_path = args.datapath + args.dataset + '/'\n    print(\"data_path = \", data_path)\n    _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                              mode='train',\n                                              dataset=args.dataset)\n\n    all_train_data = train_data_loader.train\n    all_train_labels = None\n    all_train_timestamps = None\n    all_test_data = train_data_loader.test\n    all_test_labels = train_data_loader.test_labels\n    all_test_timestamps = None\n    delay = 5\n\n    all_train_data = np.squeeze(all_train_data)\n    all_test_data = np.squeeze(all_test_data)\n\n    print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n          all_test_labels.shape)\n    # train_data = np.expand_dims(all_train_data, axis=0)\n    # print(\"train_data.shape = \", train_data.shape)\n    print(\"Read Success!!!\")\n\nelse:\n\n    # dataset = 'kpi' # yahoo, kpi\n    print('Loading data... ', end='')\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n\n\n\nlabels = []\npred = []\nscores = []\n\nif args.is_multi:\n    train_data = all_train_data  # initial batch\n    train_labels = all_train_labels\n    train_timestamps = all_train_timestamps\n\n    test_data = all_test_data  # stream\n    test_labels = all_test_labels\n    test_timestamps = all_test_timestamps\n    # print(\"k = \", k, \", test_data.shape = \", test_data.shape, test_labels.shape)\n    q = 1e-3  # risk parameter\n    s = SPOT(q)  # SPOT object\n    s.fit(train_data, test_data)  # data import\n    s.initialize()  # initialization step\n    results = s.run()  # run\n    # print()\n    test_thresholds = results['thresholds']\n    idx_anoamly = results['alarms']\n\n    print(\"test_thresholds = \", test_thresholds[:10])\n    print(\"idx_anoamly = \", idx_anoamly[:10])\n    print(\"scores = \", results['scores'][:10])\n\n    # scores = results['scores']\n\n    test_pred = np.zeros(len(test_thresholds))\n    test_pred[idx_anoamly] = 1\n\n    test_pred = get_range_proba(test_pred, test_labels, delay)\n\n    labels.append(test_labels)\n    pred.append(test_pred)\n    scores.append(results['scores'])\nelse:\n    for k in all_test_data:\n        train_data = all_train_data[k] # initial batch\n        train_labels = all_train_labels[k]\n        train_timestamps = all_train_timestamps[k]\n\n        test_data = all_test_data[k] # stream\n        test_labels = all_test_labels[k]\n        test_timestamps = all_test_timestamps[k]\n        # print(\"k = \", k, \", test_data.shape = \", test_data.shape, test_labels.shape)\n        q = 1e-3  \t\t\t# risk parameter\n        s = SPOT(q)  \t\t# SPOT object\n        s.fit(train_data, test_data) \t# data import\n        s.initialize() \t\t# initialization step\n        results = s.run() \t# run\n        # print()\n        test_thresholds = results['thresholds']\n        idx_anoamly = results['alarms']\n\n        test_pred = np.zeros(len(test_thresholds))\n        test_pred[idx_anoamly] = 1\n\n        test_pred = get_range_proba(test_pred, test_labels, delay)\n\n        labels.append(test_labels)\n        pred.append(test_pred)\n        scores.append(results['scores'])\n\nlabels = np.concatenate(labels)\npred = np.concatenate(pred)\nscores = np.concatenate(scores)\n\nif args.is_multi:\n    # labels = np.asarray(labels_log, np.int64)[0]\n    # pred = np.asarray(res_log, np.int64)[0]\n    # print(\"labels.shape = \", labels.shape, labels[:5])\n    # print(\"pred.shape = \", pred.shape, pred[:5])\n\n\n\n    events_pred = convert_vector_to_events(pred)\n    events_gt = convert_vector_to_events(labels)\n\n    Trange = (0, len(labels))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n    pred_scores = scores\n    results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n    # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n    results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n    results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n    labels, pred = adjustment(labels, pred)\n\n    eval_res = {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred),\n        \"Affiliation precision\": affiliation['precision'],\n        \"Affiliation recall\": affiliation['recall'],\n        \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n        \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n        \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n        \"VUS_PR\": vus_results[\"VUS_PR\"],\n        'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n        'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n        'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n    }\n    print(\"eval_res = \", eval_res)\nelse:\n\n    print('\\nf1:', f1_score(labels, pred))\n    print('precision:', precision_score(labels, pred))\n    print('recall:', recall_score(labels, pred))\n\n    events_pred = convert_vector_to_events(pred)\n    events_gt = convert_vector_to_events(labels)\n\n    Trange = (0, len(labels))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n    eval_res = {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred),\n        \"Affiliation precision\": affiliation['precision'],\n        \"Affiliation recall\": affiliation['recall'],\n        \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n        \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n        \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n        \"VUS_PR\": vus_results[\"VUS_PR\"]\n    }\n\n    results_f1_pa_k_10 = evaluate.evaluate(scores, labels, k=10)\n    # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n    results_f1_pa_k_50 = evaluate.evaluate(scores, labels, k=50)\n    results_f1_pa_k_90 = evaluate.evaluate(scores, labels, k=90)\n\n    eval_res['f1_pa_10'] = results_f1_pa_k_10['best_f1_w_pa']\n    eval_res['f1_pa_50'] = results_f1_pa_k_50['best_f1_w_pa']\n    eval_res['f1_pa_90'] = results_f1_pa_k_90['best_f1_w_pa']\n\n\neval_res['dataset'] = args.dataset + str(args.index)\nimport pandas as pd\nimport os\n\n# 转换字典为 DataFrame\ndf = pd.DataFrame([eval_res])\n# 指定保存路径\nsave_path = args.save_dir + args.save_csv_name\n\n# 转换字典为 DataFrame\ndf_new = pd.DataFrame([eval_res])\n\n# 检查文件是否存在\nif os.path.exists(save_path):\n    # 文件存在，读取现有数据\n    df_existing = pd.read_csv(save_path, index_col=0)\n    # 将新数据附加到现有数据框中\n    df_combined = pd.concat([df_existing, df_new], ignore_index=True)\nelse:\n    # 文件不存在，创建新的数据框\n    df_combined = df_new\n\n# 保存 DataFrame 为 CSV 文件\ndf_combined.to_csv(save_path, index=True, index_label=\"id\")\n\nprint(\"Finished.\")"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_spot_multi.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom spot import SPOT\nimport numpy as np\nimport time\nimport datetime\nimport datautils\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport argparse\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events\nfrom other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc\nfrom other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events\nfrom tadpak import evaluate\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\nparser = argparse.ArgumentParser()\n# parser.add_argument('dataset', help='The dataset name')\n# parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n# parser.add_argument('--dataset', default='kpi', help='The dataset name, yahoo, kpi')\nparser.add_argument('--dataset', default='UCR',\n                help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\nparser.add_argument('--is_multi', type=bool, default=True, help='The dataset name, yahoo, kpi')\nparser.add_argument('--datapath', default='./datasets/', help='')\nparser.add_argument('--index', type=int, default=241, help='')   ## 79, 108, 187, 203, 239, 240, 241]\nparser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\nparser.add_argument('--batch_size', type=int, default=8, help='The batch size (defaults to 8)')\nparser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\nparser.add_argument('--save_csv_name', type=str, default='spot_ucr_0727.csv')\n\nargs = parser.parse_args()\n\nprint(\"Dataset:\", args.dataset)\nprint(\"Arguments:\", str(args))\n\nif args.is_multi:\n    from datasets.data_loader import get_loader_segment\n\n    data_path = args.datapath + args.dataset + '/'\n    print(\"data_path = \", data_path)\n    _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                              mode='train',\n                                              dataset=args.dataset)\n\n    all_train_data = train_data_loader.train\n    all_train_labels = None\n    all_train_timestamps = None\n    all_test_data = train_data_loader.test\n    all_test_labels = train_data_loader.test_labels\n    all_test_timestamps = None\n    delay = 5\n\n    all_train_data = np.squeeze(all_train_data)\n    all_test_data = np.squeeze(all_test_data)\n\n    print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n          all_test_labels.shape)\n    # train_data = np.expand_dims(all_train_data, axis=0)\n    # print(\"train_data.shape = \", train_data.shape)\n    print(\"Read Success!!!\")\n\nelse:\n\n    # dataset = 'kpi' # yahoo, kpi\n    print('Loading data... ', end='')\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n\n\n\nlabels = []\npred = []\nscores = []\n\nif args.is_multi:\n    train_data = all_train_data  # initial batch\n    train_labels = all_train_labels\n    train_timestamps = all_train_timestamps\n\n    test_data = all_test_data  # stream\n    test_labels = all_test_labels\n    test_timestamps = all_test_timestamps\n    # print(\"k = \", k, \", test_data.shape = \", test_data.shape, test_labels.shape)\n    q = 1e-3  # risk parameter\n    s = SPOT(q)  # SPOT object\n    s.fit(train_data, test_data)  # data import\n    s.initialize()  # initialization step\n    results = s.run()  # run\n    # print()\n    test_thresholds = results['thresholds']\n    idx_anoamly = results['alarms']\n\n    print(\"test_thresholds = \", test_thresholds[:10])\n    print(\"idx_anoamly = \", idx_anoamly[:10])\n    print(\"scores = \", results['scores'][:10])\n\n    # scores = results['scores']\n\n    test_pred = np.zeros(len(test_thresholds))\n    test_pred[idx_anoamly] = 1\n\n    test_pred = get_range_proba(test_pred, test_labels, delay)\n\n    labels.append(test_labels)\n    pred.append(test_pred)\n    scores.append(results['scores'])\nelse:\n    for k in all_test_data:\n        train_data = all_train_data[k] # initial batch\n        train_labels = all_train_labels[k]\n        train_timestamps = all_train_timestamps[k]\n\n        test_data = all_test_data[k] # stream\n        test_labels = all_test_labels[k]\n        test_timestamps = all_test_timestamps[k]\n        # print(\"k = \", k, \", test_data.shape = \", test_data.shape, test_labels.shape)\n        q = 1e-3  \t\t\t# risk parameter\n        s = SPOT(q)  \t\t# SPOT object\n        s.fit(train_data, test_data) \t# data import\n        s.initialize() \t\t# initialization step\n        results = s.run() \t# run\n        # print()\n        test_thresholds = results['thresholds']\n        idx_anoamly = results['alarms']\n\n        test_pred = np.zeros(len(test_thresholds))\n        test_pred[idx_anoamly] = 1\n\n        test_pred = get_range_proba(test_pred, test_labels, delay)\n\n        labels.append(test_labels)\n        pred.append(test_pred)\n        scores.append(results['scores'])\n\nlabels = np.concatenate(labels)\npred = np.concatenate(pred)\nscores = np.concatenate(scores)\n\nif args.is_multi:\n    # labels = np.asarray(labels_log, np.int64)[0]\n    # pred = np.asarray(res_log, np.int64)[0]\n    # print(\"labels.shape = \", labels.shape, labels[:5])\n    # print(\"pred.shape = \", pred.shape, pred[:5])\n\n    if args.index == 79 or args.index == 108 or args.index == 187 or args.index == 203:\n        labels, pred = adjustment(labels, pred)\n        eval_res = {\n            'f1': f1_score(labels, pred),\n            'precision': precision_score(labels, pred),\n            'recall': recall_score(labels, pred),\n            \"Affiliation precision\": None,\n            \"Affiliation recall\": None,\n            \"R_AUC_ROC\": None,\n            \"R_AUC_PR\": None,\n            \"VUS_ROC\": None,\n            \"VUS_PR\": None,\n            'f1_pa_10': None,\n            'f1_pa_50': None,\n            'f1_pa_90': None,\n        }\n    else:\n\n        events_pred = convert_vector_to_events(pred)\n        events_gt = convert_vector_to_events(labels)\n\n        Trange = (0, len(labels))\n        affiliation = pr_from_events(events_pred, events_gt, Trange)\n        vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n        pred_scores = scores\n        if args.index == 239 or args.index == 240:\n            labels, pred = adjustment(labels, pred)\n\n            eval_res = {\n                'f1': f1_score(labels, pred),\n                'precision': precision_score(labels, pred),\n                'recall': recall_score(labels, pred),\n                \"Affiliation precision\": affiliation['precision'],\n                \"Affiliation recall\": affiliation['recall'],\n                \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n                \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n                \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n                \"VUS_PR\": vus_results[\"VUS_PR\"],\n                'f1_pa_10': None,\n                'f1_pa_50': None,\n                'f1_pa_90': None,\n            }\n        else:\n            results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)\n            # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n            results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)\n            results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)\n\n            labels, pred = adjustment(labels, pred)\n\n            eval_res = {\n                'f1': f1_score(labels, pred),\n                'precision': precision_score(labels, pred),\n                'recall': recall_score(labels, pred),\n                \"Affiliation precision\": affiliation['precision'],\n                \"Affiliation recall\": affiliation['recall'],\n                \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n                \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n                \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n                \"VUS_PR\": vus_results[\"VUS_PR\"],\n                'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],\n                'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],\n                'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],\n            }\n    print(\"eval_res = \", eval_res)\nelse:\n\n    print('\\nf1:', f1_score(labels, pred))\n    print('precision:', precision_score(labels, pred))\n    print('recall:', recall_score(labels, pred))\n\n    events_pred = convert_vector_to_events(pred)\n    events_gt = convert_vector_to_events(labels)\n\n    Trange = (0, len(labels))\n    affiliation = pr_from_events(events_pred, events_gt, Trange)\n    vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100\n\n    eval_res = {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred),\n        \"Affiliation precision\": affiliation['precision'],\n        \"Affiliation recall\": affiliation['recall'],\n        \"R_AUC_ROC\": vus_results[\"R_AUC_ROC\"],\n        \"R_AUC_PR\": vus_results[\"R_AUC_PR\"],\n        \"VUS_ROC\": vus_results[\"VUS_ROC\"],\n        \"VUS_PR\": vus_results[\"VUS_PR\"]\n    }\n\n    results_f1_pa_k_10 = evaluate.evaluate(scores, labels, k=10)\n    # results_f1_pa_k_30 = evaluate.evaluate(pred, labels, k=30)\n    results_f1_pa_k_50 = evaluate.evaluate(scores, labels, k=50)\n    results_f1_pa_k_90 = evaluate.evaluate(scores, labels, k=90)\n\n    eval_res['f1_pa_10'] = results_f1_pa_k_10['best_f1_w_pa']\n    eval_res['f1_pa_50'] = results_f1_pa_k_50['best_f1_w_pa']\n    eval_res['f1_pa_90'] = results_f1_pa_k_90['best_f1_w_pa']\n\n\neval_res['dataset'] = args.dataset + str(args.index)\nimport pandas as pd\nimport os\n\n# 转换字典为 DataFrame\ndf = pd.DataFrame([eval_res])\n# 指定保存路径\nsave_path = args.save_dir + args.save_csv_name\n\n# 转换字典为 DataFrame\ndf_new = pd.DataFrame([eval_res])\n\n# 检查文件是否存在\nif os.path.exists(save_path):\n    # 文件存在，读取现有数据\n    df_existing = pd.read_csv(save_path, index_col=0)\n    # 将新数据附加到现有数据框中\n    df_combined = pd.concat([df_existing, df_new], ignore_index=True)\nelse:\n    # 文件不存在，创建新的数据框\n    df_combined = df_new\n\n# 保存 DataFrame 为 CSV 文件\ndf_combined.to_csv(save_path, index=True, index_label=\"id\")\n\nprint(\"Finished.\")"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_timesnet.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\n\nfrom other_anomaly_baselines.exp_anomaly_detection import Exp_Anomaly_Detection\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\n\nimport random\nimport numpy as np\n\nif __name__ == '__main__':\n    fix_seed = 42\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    parser = argparse.ArgumentParser(description='TimesNet')\n\n    # basic config\n    parser.add_argument('--task_name', type=str, default='anomaly_detection',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, default=1, help='status')\n    parser.add_argument('--model_id', type=str, default='test', help='model id')\n    parser.add_argument('--model', type=str, default='TimesNet',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    parser.add_argument('--data', type=str, default='UCR', help='dataset type')   ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    # parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=100, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=0, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.5, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--top_k', type=int, default=3, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=1, help='encoder input size') ## 55 for MSL, 38 for SMD, SMAP for 25, PSM for 25, SWAT for 51, NIPS_TS_Swan for 38,\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')  ## NIPS_TS_Water for 38, UCR for 1\n    parser.add_argument('--c_out', type=int, default=1, help='output size')\n    parser.add_argument('--d_model', type=int, default=8, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=1, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=16, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=3, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # Default\n    parser.add_argument('--index', type=int, default=137)\n    parser.add_argument('--data_path', type=str, default='datasets/')\n    parser.add_argument('--win_size', type=int, default=100)\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='timesnet_ucr_0727.csv')\n\n\n    args = parser.parse_args()\n    args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print(args)\n\n    Exp = Exp_Anomaly_Detection\n\n    train_loader, train_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n                                              win_size=args.win_size, mode='train', dataset=args.data)\n    val_loader, val_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n                                             win_size=args.win_size, mode='val', dataset=args.data)\n    test_loader, test_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n                                             win_size=args.win_size, mode='test', dataset=args.data)\n\n    train_set = train_set.train\n    val_set = val_set.val\n    test_set = test_set.test\n\n    print(\"train_set.shape = \", train_set.shape, \", test_set.shape = \", test_set.shape, test_set.shape[-1])\n    args.enc_in = train_set.shape[-1]\n    args.c_out = train_set.shape[-1]\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            exp = Exp(args, train_set, train_loader, val_set, val_loader, test_set, test_loader)  # set experiments\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            eval_res = exp.test(setting, dataset=args.data, ucr_index=args.index)\n            torch.cuda.empty_cache()\n\n            print(\"result_dict = \", eval_res)\n\n            eval_res['dataset'] = args.data + str(args.index)\n            import pandas as pd\n\n            # 转换字典为 DataFrame\n            df = pd.DataFrame([eval_res])\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([eval_res])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_timesnet_uni.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom other_anomaly_baselines.exp_anomaly_detection import Exp_Anomaly_Detection\nfrom other_anomaly_baselines.datasets.data_loader import get_loader_segment\nimport numpy as np\nimport random\nimport datautils\nfrom torch.utils.data import TensorDataset, DataLoader\n\n\nclass UniLoader(object):\n    def __init__(self, data_set, win_size, step, mode=\"train\"):\n        self.mode = mode\n        self.step = step\n        self.win_size = win_size\n\n        self.train = data_set\n\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n\n        return (self.train.shape[0] - self.win_size) // self.step + 1\n\n\n    def __getitem__(self, index):\n        index = index * self.step\n\n        return np.float32(self.train[index:index + self.win_size])\n\n\nif __name__ == '__main__':\n    fix_seed = 42\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    parser = argparse.ArgumentParser(description='TimesNet')\n\n    # basic config\n    parser.add_argument('--task_name', type=str, default='anomaly_detection',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, default=1, help='status')\n    parser.add_argument('--model_id', type=str, default='test', help='model id')\n    parser.add_argument('--model', type=str, default='TimesNet',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    # parser.add_argument('--data', type=str, default='UCR', help='dataset type')   ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--data', type=str, default='kpi')  ##  kpi, yahoo\n    parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    # parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=100, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=0, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=1, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--top_k', type=int, default=3, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=1, help='encoder input size') ## 55 for MSL, 38 for SMD, SMAP for 25, PSM for 25, SWAT for 51, NIPS_TS_Swan for 38,\n\n    parser.add_argument('--dec_in', type=int, default=1, help='decoder input size')  ## NIPS_TS_Water for 38, UCR for 1\n    parser.add_argument('--c_out', type=int, default=1, help='output size')\n    parser.add_argument('--d_model', type=int, default=8, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=1, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=16, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=1, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=1, help='gpu')\n    parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # Default\n    parser.add_argument('--index', type=int, default=137)\n    parser.add_argument('--data_path', type=str, default='datasets/')\n    parser.add_argument('--win_size', type=int, default=100)\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='timesnet_uni_0722.csv')\n\n\n    args = parser.parse_args()\n    args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/tsm_ptms_anomaly_detection/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print(args)\n\n    Exp = Exp_Anomaly_Detection\n\n    # dataset = 'MSL'\n    # _train_loader, _train_set = get_loader_segment(args.index, args.data_path + dataset,\n    #                                                batch_size=args.batch_size,\n    #                                                win_size=args.win_size, mode='train', dataset=dataset)\n    #\n    # _train_set = _train_set.train\n    #\n    # print(\"_train_set.shape = \", _train_set.shape)\n\n    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n        args.data)\n    train_data = datautils.gen_ano_train_data(all_train_data)\n\n    print(\"train_data.shape = \", train_data.shape)\n    _train_data = train_data[0]\n    print(\"000train_data.shape = \", train_data.shape, type(train_data))\n    _train_data = np.array(_train_data)\n    print(\"111_train_data.shape = \", _train_data.shape, type(_train_data))\n\n    train_dataset = UniLoader(_train_data, args.win_size, 1)\n\n    train_loader = DataLoader(dataset=train_dataset,\n                              batch_size=args.batch_size,\n                              shuffle=True,\n                              num_workers=2,\n                              drop_last=True)\n\n\n    val_loader = train_loader\n\n    args.input_c = train_data.shape[-1]\n    args.output_c = train_data.shape[-1]\n\n\n\n\n    # train_loader, train_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n    #                                           win_size=args.win_size, mode='train', dataset=args.data)\n    # val_loader, val_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n    #                                          win_size=args.win_size, mode='val', dataset=args.data)\n    # test_loader, test_set = get_loader_segment(args.index, args.data_path + args.data, batch_size=args.batch_size,\n    #                                          win_size=args.win_size, mode='test', dataset='UCR')\n\n    # train_set = train_set.train\n    # val_set = val_set.val\n    # test_set = test_set.test\n\n    print(\"train_set.shape = \", _train_data.shape)\n    args.enc_in = _train_data.shape[-1]\n    args.c_out = _train_data.shape[-1]\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            exp = Exp(args, _train_data, train_loader, _train_data, train_loader, _train_data, train_loader)  # set experiments\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train_uni(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            eval_res = exp.test_uni(setting, all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, args)\n            torch.cuda.empty_cache()\n\n            print(\"result_dict = \", eval_res)\n\n            eval_res['dataset'] = args.data + str(args.index)\n            import pandas as pd\n\n            # 转换字典为 DataFrame\n            df = pd.DataFrame([eval_res])\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([eval_res])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_ts2vec.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom ts2vec import TS2Vec\nfrom other_anomaly_baselines.tasks.anomaly_detection import eval_anomaly_detection, eval_anomaly_detection_coldstart\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\ndef save_checkpoint_callback(\n    save_every=1,\n    unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n    return callback\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', default='kpi', help='The dataset name, yahoo, kpi')   ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--is_multi', default=False, help='The dataset name, yahoo, kpi')\n    parser.add_argument('--datapath', default='./datasets/', help='')\n    parser.add_argument('--index', type=int, default=143, help='')\n    parser.add_argument('--run_name', default='ts2Vec', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, default='anomaly', help='The data loader used to load the experimental data. This can be set to anomaly or anomaly_coldstart')\n    parser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch_size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None, help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None, help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', default='True', help='Whether to perform evaluation after training')   ## action=\"store_true\"\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='ts2vec_uni_0723.csv')\n\n    args = parser.parse_args()\n    \n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n    \n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n    \n    print('Loading data... ', end='')\n    if args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n        if args.is_multi:\n            from datasets.data_loader import get_loader_segment\n\n            data_path = args.datapath + args.dataset + '/'\n            print(\"data_path = \", data_path)\n            _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100, mode='train',\n                                             dataset=args.dataset)\n            # val_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100, mode='val',\n            #                                  dataset=args.dataset)\n            # test_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100, mode='test',\n            #                                  dataset=args.dataset)\n\n            all_train_data = train_data_loader.train\n            all_train_labels = None\n            all_train_timestamps = None\n            all_test_data = train_data_loader.test\n            all_test_labels = train_data_loader.test_labels\n            all_test_timestamps = None\n            delay = 5\n\n\n\n            print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape, all_test_labels.shape)\n            train_data = np.expand_dims(all_train_data, axis=0)\n            print(\"train_data.shape = \", train_data.shape)\n            print(\"Read Success!!!\")\n        else:\n            all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n            train_data = datautils.gen_ano_train_data(all_train_data)\n        \n    elif args.loader == 'anomaly_coldstart':\n        task_type = 'anomaly_detection_coldstart'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data, _, _, _ = datautils.load_UCR('FordA')\n        \n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n        \n        \n    if args.irregular > 0:\n        raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n    \n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n    \n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n    \n    t = time.time()\n    \n    model = TS2Vec(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.fit(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\")\n    print(\"Training time(seconds): \", t)\n\n    if args.eval:\n        if task_type == 'anomaly_detection':\n            out, eval_res = eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data,\n                                                   all_test_labels, all_test_timestamps, delay, is_multi=args.is_multi)\n        elif task_type == 'anomaly_detection_coldstart':\n            out, eval_res = eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n\n        eval_res['dataset'] = args.dataset + str(args.index)\n        import pandas as pd\n\n        # 转换字典为 DataFrame\n        df = pd.DataFrame([eval_res])\n        # 指定保存路径\n        save_path = args.save_dir + args.save_csv_name\n\n        # 转换字典为 DataFrame\n        df_new = pd.DataFrame([eval_res])\n\n        # 检查文件是否存在\n        if os.path.exists(save_path):\n            # 文件存在，读取现有数据\n            df_existing = pd.read_csv(save_path, index_col=0)\n            # 将新数据附加到现有数据框中\n            df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n        else:\n            # 文件不存在，创建新的数据框\n            df_combined = df_new\n\n        # 保存 DataFrame 为 CSV 文件\n        df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_ts2vec_multi.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom ts2vec import TS2Vec\nfrom other_anomaly_baselines.tasks.anomaly_detection import eval_anomaly_detection, eval_anomaly_detection_coldstart\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\n\ndef save_checkpoint_callback(\n        save_every=1,\n        unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n\n    return callback\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', default='UCR',\n                        help='The dataset name, yahoo, kpi')  ##  SMD, MSL, SMAP, PSM, SWAT, NIPS_TS_Swan, UCR, NIPS_TS_Water\n    parser.add_argument('--is_multi', default=True, help='The dataset name, yahoo, kpi')\n    parser.add_argument('--datapath', default='./datasets/', help='')\n    parser.add_argument('--index', type=int, default=203, help='') ## [79, 108, 187, 203]\n    parser.add_argument('--run_name', default='ts2Vec',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, default='anomaly',\n                        help='The data loader used to load the experimental data. This can be set to anomaly or anomaly_coldstart')\n    parser.add_argument('--gpu', type=int, default=0,\n                        help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch_size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000,\n                        help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None,\n                        help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None,\n                        help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', default='True',\n                        help='Whether to perform evaluation after training')  ## action=\"store_true\"\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/tsm_ptms_anomaly_detection/result/')\n    parser.add_argument('--save_csv_name', type=str, default='ts2vec_ucr_0727.csv')\n\n    args = parser.parse_args()\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n\n    print('Loading data... ', end='')\n    if args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n        if args.is_multi:\n            from datasets.data_loader import get_loader_segment\n\n            data_path = args.datapath + args.dataset + '/'\n            print(\"data_path = \", data_path)\n            _, train_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100,\n                                                      mode='train',\n                                                      dataset=args.dataset)\n            # val_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100, mode='val',\n            #                                  dataset=args.dataset)\n            # test_data_loader = get_loader_segment(args.index, data_path, args.batch_size, win_size=100, step=100, mode='test',\n            #                                  dataset=args.dataset)\n\n            all_train_data = train_data_loader.train\n            all_train_labels = None\n            all_train_timestamps = None\n            all_test_data = train_data_loader.test\n            all_test_labels = train_data_loader.test_labels\n            all_test_timestamps = None\n            delay = 5\n\n            print(\"all_train_data test_data, test_labels.shape = \", all_train_data.shape, all_test_data.shape,\n                  all_test_labels.shape)\n            train_data = np.expand_dims(all_train_data, axis=0)\n            print(\"train_data.shape = \", train_data.shape)\n            print(\"Read Success!!!\")\n        else:\n            all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n                args.dataset)\n            train_data = datautils.gen_ano_train_data(all_train_data)\n\n    elif args.loader == 'anomaly_coldstart':\n        task_type = 'anomaly_detection_coldstart'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n            args.dataset)\n        train_data, _, _, _ = datautils.load_UCR('FordA')\n\n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n\n    if args.irregular > 0:\n        raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n\n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n\n    t = time.time()\n\n    model = TS2Vec(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.fit(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\")\n    print(\"Training time(seconds): \", t)\n\n    if args.eval:\n        if task_type == 'anomaly_detection':\n            out, eval_res = eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps,\n                                                   all_test_data,\n                                                   all_test_labels, all_test_timestamps, delay, is_multi=args.is_multi, ucr_index=args.index)\n        elif task_type == 'anomaly_detection_coldstart':\n            out, eval_res = eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels,\n                                                             all_train_timestamps, all_test_data, all_test_labels,\n                                                             all_test_timestamps, delay)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n        eval_res['dataset'] = args.dataset + str(args.index)\n        import pandas as pd\n\n        # 转换字典为 DataFrame\n        df = pd.DataFrame([eval_res])\n        # 指定保存路径\n        save_path = args.save_dir + args.save_csv_name\n\n        # 转换字典为 DataFrame\n        df_new = pd.DataFrame([eval_res])\n\n        # 检查文件是否存在\n        if os.path.exists(save_path):\n            # 文件存在，读取现有数据\n            df_existing = pd.read_csv(save_path, index_col=0)\n            # 将新数据附加到现有数据框中\n            df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n        else:\n            # 文件不存在，创建新的数据框\n            df_combined = df_new\n\n        # 保存 DataFrame 为 CSV 文件\n        df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/ts2vec.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nfrom models import TSEncoder\nfrom models.losses import hierarchical_contrastive_loss\nfrom utils import take_per_row, split_with_nan, centerize_vary_length_series, torch_pad_nan\nimport math\n\nclass TS2Vec:\n    '''The TS2Vec model'''\n    \n    def __init__(\n        self,\n        input_dims,\n        output_dims=320,\n        hidden_dims=64,\n        depth=10,\n        device='cuda',\n        lr=0.001,\n        batch_size=16,\n        max_train_length=None,\n        temporal_unit=0,\n        after_iter_callback=None,\n        after_epoch_callback=None\n    ):\n        ''' Initialize a TS2Vec model.\n        \n        Args:\n            input_dims (int): The input dimension. For a univariate time series, this should be set to 1.\n            output_dims (int): The representation dimension.\n            hidden_dims (int): The hidden dimension of the encoder.\n            depth (int): The number of hidden residual blocks in the encoder.\n            device (int): The gpu used for training and inference.\n            lr (int): The learning rate.\n            batch_size (int): The batch size.\n            max_train_length (Union[int, NoneType]): The maximum allowed sequence length for training. For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length>.\n            temporal_unit (int): The minimum unit to perform temporal contrast. When training on a very long sequence, this param helps to reduce the cost of time and memory.\n            after_iter_callback (Union[Callable, NoneType]): A callback function that would be called after each iteration.\n            after_epoch_callback (Union[Callable, NoneType]): A callback function that would be called after each epoch.\n        '''\n        \n        super().__init__()\n        self.device = device\n        self.lr = lr\n        self.batch_size = batch_size\n        self.max_train_length = max_train_length\n        self.temporal_unit = temporal_unit\n        \n        self._net = TSEncoder(input_dims=input_dims, output_dims=output_dims, hidden_dims=hidden_dims, depth=depth).to(self.device)\n        self.net = torch.optim.swa_utils.AveragedModel(self._net)\n        self.net.update_parameters(self._net)\n        \n        self.after_iter_callback = after_iter_callback\n        self.after_epoch_callback = after_epoch_callback\n        \n        self.n_epochs = 0\n        self.n_iters = 0\n    \n    def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):\n        ''' Training the TS2Vec model.\n        \n        Args:\n            train_data (numpy.ndarray): The training data. It should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            n_epochs (Union[int, NoneType]): The number of epochs. When this reaches, the training stops.\n            n_iters (Union[int, NoneType]): The number of iterations. When this reaches, the training stops. If both n_epochs and n_iters are not specified, a default setting would be used that sets n_iters to 200 for a dataset with size <= 100000, 600 otherwise.\n            verbose (bool): Whether to print the training loss after each epoch.\n            \n        Returns:\n            loss_log: a list containing the training losses on each epoch.\n        '''\n        assert train_data.ndim == 3\n        \n        if n_iters is None and n_epochs is None:\n            n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters    ###   n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters\n        \n        if self.max_train_length is not None:\n            sections = train_data.shape[1] // self.max_train_length\n            if sections >= 2:\n                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)\n                # train_data: (n_instance*sections, max_train_length, n_features)\n\n        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0) # (max_train_length)\n        if temporal_missing[0] or temporal_missing[-1]: # whether the head or tail exists nan\n            train_data = centerize_vary_length_series(train_data)\n                \n        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)] \n        # delete the sequence (max_train_length, n_features) contains only nan\n        \n        train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))\n        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)\n        \n        optimizer = torch.optim.AdamW(self._net.parameters(), lr=self.lr)\n        \n        loss_log = []\n        \n        while True:\n            if n_epochs is not None and self.n_epochs >= n_epochs:\n                break\n            \n            cum_loss = 0\n            n_epoch_iters = 0\n            \n            interrupted = False\n            for batch in train_loader:\n                if n_iters is not None and self.n_iters >= n_iters:\n                    interrupted = True\n                    break\n                \n                x = batch[0]  #(batch_size, n_timestamps, n_features)\n                # print(\"#####################\")\n                # raise Exception('my personal exception!')\n\n                if self.max_train_length is not None and x.size(1) > self.max_train_length:\n                    window_offset = np.random.randint(x.size(1) - self.max_train_length + 1)\n                    x = x[:, window_offset : window_offset + self.max_train_length]\n                x = x.to(self.device)\n                \n                ts_l = x.size(1)\n                crop_l = np.random.randint(low=2 ** (self.temporal_unit + 1), high=ts_l+1)\n                crop_left = np.random.randint(ts_l - crop_l + 1)\n                crop_right = crop_left + crop_l\n                crop_eleft = np.random.randint(crop_left + 1)\n                crop_eright = np.random.randint(low=crop_right, high=ts_l + 1)\n                crop_offset = np.random.randint(low=-crop_eleft, high=ts_l - crop_eright + 1, size=x.size(0))\n                \n                optimizer.zero_grad()\n                \n                out1 = self._net(take_per_row(x, crop_offset + crop_eleft, crop_right - crop_eleft)) \n                out1 = out1[:, -crop_l:]\n                \n                out2 = self._net(take_per_row(x, crop_offset + crop_left, crop_eright - crop_left))\n                out2 = out2[:, :crop_l]\n                \n                loss = hierarchical_contrastive_loss(\n                    out1,\n                    out2,\n                    temporal_unit=self.temporal_unit\n                )\n                \n                loss.backward()\n                optimizer.step()\n                self.net.update_parameters(self._net)\n                    \n                cum_loss += loss.item()\n                n_epoch_iters += 1\n                \n                self.n_iters += 1\n                \n                if self.after_iter_callback is not None:\n                    self.after_iter_callback(self, loss.item())\n            \n            if interrupted:\n                break\n            \n            cum_loss /= n_epoch_iters\n            loss_log.append(cum_loss)\n            if verbose:\n                print(f\"Epoch #{self.n_epochs}: loss={cum_loss}\")\n            self.n_epochs += 1\n            \n            if self.after_epoch_callback is not None:\n                self.after_epoch_callback(self, cum_loss)\n            \n        return loss_log\n    \n    def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_window=None):\n        out = self.net(x.to(self.device, non_blocking=True), mask)\n        if encoding_window == 'full_series':\n            if slicing is not None:\n                out = out[:, slicing]\n            out = F.max_pool1d(\n                out.transpose(1, 2),\n                kernel_size = out.size(1),\n            ).transpose(1, 2)\n            \n        elif isinstance(encoding_window, int):\n            out = F.max_pool1d(\n                out.transpose(1, 2),\n                kernel_size = encoding_window,\n                stride = 1,\n                padding = encoding_window // 2\n            ).transpose(1, 2)\n            if encoding_window % 2 == 0:\n                out = out[:, :-1]\n            if slicing is not None:\n                out = out[:, slicing]\n            \n        elif encoding_window == 'multiscale':\n            p = 0\n            reprs = []\n            while (1 << p) + 1 < out.size(1):\n                t_out = F.max_pool1d(\n                    out.transpose(1, 2),\n                    kernel_size = (1 << (p + 1)) + 1,\n                    stride = 1,\n                    padding = 1 << p\n                ).transpose(1, 2)\n                if slicing is not None:\n                    t_out = t_out[:, slicing]\n                reprs.append(t_out)\n                p += 1\n            out = torch.cat(reprs, dim=-1)\n            \n        else:\n            if slicing is not None:\n                out = out[:, slicing]\n            \n        return out.cpu()\n    \n    def encode(self, data, mask=None, encoding_window=None, casual=False, sliding_length=None, sliding_padding=0, batch_size=None):\n        ''' Compute representations using the model.\n        \n        Args:\n            data (numpy.ndarray): This should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            mask (str): The mask used by encoder can be specified with this parameter. This can be set to 'binomial', 'continuous', 'all_true', 'all_false' or 'mask_last'.\n            encoding_window (Union[str, int]): When this param is specified, the computed representation would the max pooling over this window. This can be set to 'full_series', 'multiscale' or an integer specifying the pooling kernel size.\n            casual (bool): When this param is set to True, the future informations would not be encoded into representation of each timestamp.\n            sliding_length (Union[int, NoneType]): The length of sliding window. When this param is specified, a sliding inference would be applied on the time series.\n            sliding_padding (int): This param specifies the contextual data length used for inference every sliding windows.\n            batch_size (Union[int, NoneType]): The batch size used for inference. If not specified, this would be the same batch size as training.\n            \n        Returns:\n            repr: The representations for data.\n        '''\n        assert self.net is not None, 'please train or load a net first'\n        assert data.ndim == 3\n\n        print(\"data.shape = \", data.shape)\n        if batch_size is None:\n            batch_size = self.batch_size\n        n_samples, ts_l, _ = data.shape\n\n        org_training = self.net.training\n        self.net.eval()\n        \n        dataset = TensorDataset(torch.from_numpy(data).to(torch.float))\n        loader = DataLoader(dataset, batch_size=batch_size)\n        \n        with torch.no_grad():\n            output = []\n            for batch in loader:\n                x = batch[0]\n                if sliding_length is not None:\n                    reprs = []\n                    if n_samples < batch_size:\n                        calc_buffer = []\n                        calc_buffer_l = 0\n                    for i in range(0, ts_l, sliding_length):\n                        l = i - sliding_padding\n                        r = i + sliding_length + (sliding_padding if not casual else 0)\n                        x_sliding = torch_pad_nan(\n                            x[:, max(l, 0) : min(r, ts_l)],\n                            left=-l if l<0 else 0,\n                            right=r-ts_l if r>ts_l else 0,\n                            dim=1\n                        )\n                        if n_samples < batch_size:\n                            if calc_buffer_l + n_samples > batch_size:\n                                out = self._eval_with_pooling(\n                                    torch.cat(calc_buffer, dim=0),\n                                    mask,\n                                    slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                    encoding_window=encoding_window\n                                )\n                                reprs += torch.split(out, n_samples)\n                                calc_buffer = []\n                                calc_buffer_l = 0\n                            calc_buffer.append(x_sliding)\n                            calc_buffer_l += n_samples\n                        else:\n                            out = self._eval_with_pooling(\n                                x_sliding,\n                                mask,\n                                slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                encoding_window=encoding_window\n                            )\n                            reprs.append(out)\n\n                    if n_samples < batch_size:\n                        if calc_buffer_l > 0:\n                            out = self._eval_with_pooling(\n                                torch.cat(calc_buffer, dim=0),\n                                mask,\n                                slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                encoding_window=encoding_window\n                            )\n                            reprs += torch.split(out, n_samples)\n                            calc_buffer = []\n                            calc_buffer_l = 0\n                    \n                    out = torch.cat(reprs, dim=1)\n                    if encoding_window == 'full_series':\n                        out = F.max_pool1d(\n                            out.transpose(1, 2).contiguous(),\n                            kernel_size = out.size(1),\n                        ).squeeze(1)\n                else:\n                    out = self._eval_with_pooling(x, mask, encoding_window=encoding_window)\n                    if encoding_window == 'full_series':\n                        out = out.squeeze(1)\n                        \n                output.append(out)\n                \n            output = torch.cat(output, dim=0)\n            \n        self.net.train(org_training)\n        return output.numpy()\n    \n    def save(self, fn):\n        ''' Save the model to a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        torch.save(self.net.state_dict(), fn)\n    \n    def load(self, fn):\n        ''' Load the model from a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        state_dict = torch.load(fn, map_location=self.device)\n        self.net.load_state_dict(state_dict)\n    \n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/utils.py",
    "content": "import os\nimport numpy as np\nimport pickle\nimport torch\nimport random\nfrom datetime import datetime\n\ndef pkl_save(name, var):\n    with open(name, 'wb') as f:\n        pickle.dump(var, f)\n\ndef pkl_load(name):\n    with open(name, 'rb') as f:\n        return pickle.load(f)\n    \ndef torch_pad_nan(arr, left=0, right=0, dim=0):\n    if left > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = left\n        arr = torch.cat((torch.full(padshape, np.nan), arr), dim=dim)\n    if right > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = right\n        arr = torch.cat((arr, torch.full(padshape, np.nan)), dim=dim)\n    return arr\n\n### pad the 'nan' for the sequence to get teh target_length\n# both_side=True: padding at both the head and the tail with half pad_size\n# both_side=False: padding at the tail with whole pad_size    \ndef pad_nan_to_target(array, target_length, axis=0, both_side=False):\n    assert array.dtype in [np.float16, np.float32, np.float64]\n    pad_size = target_length - array.shape[axis]\n    if pad_size <= 0:\n        return array\n    npad = [(0, 0)] * array.ndim # 1\n    if both_side:\n        npad[axis] = (pad_size // 2, pad_size - pad_size//2)\n    else:\n        npad[axis] = (0, pad_size)\n    return np.pad(array, pad_width=npad, mode='constant', constant_values=np.nan)\n\n### split the sequence into some (sections) subsequence,\n### and padding at the tail to have same length (max_train_length)\ndef split_with_nan(x, sections, axis=0):\n    assert x.dtype in [np.float16, np.float32, np.float64]\n    arrs = np.array_split(x, sections, axis=axis)\n    target_length = arrs[0].shape[axis]\n    for i in range(len(arrs)):\n        arrs[i] = pad_nan_to_target(arrs[i], target_length, axis=axis)\n    return arrs\n\ndef take_per_row(A, indx, num_elem):\n    all_indx = indx[:,None] + np.arange(num_elem)\n    return A[torch.arange(all_indx.shape[0])[:,None], all_indx]\n\ndef centerize_vary_length_series(x):\n    prefix_zeros = np.argmax(~np.isnan(x).all(axis=-1), axis=1)\n    suffix_zeros = np.argmax(~np.isnan(x[:, ::-1]).all(axis=-1), axis=1)\n    offset = (prefix_zeros + suffix_zeros) // 2 - prefix_zeros\n    rows, column_indices = np.ogrid[:x.shape[0], :x.shape[1]]\n    offset[offset < 0] += x.shape[1]\n    column_indices = column_indices - offset[:, np.newaxis]\n    return x[rows, column_indices]\n\ndef data_dropout(arr, p):\n    B, T = arr.shape[0], arr.shape[1]\n    mask = np.full(B*T, False, dtype=np.bool)\n    ele_sel = np.random.choice(\n        B*T,\n        size=int(B*T*p),\n        replace=False\n    )\n    mask[ele_sel] = True\n    res = arr.copy()\n    res[mask.reshape(B, T)] = np.nan\n    return res\n\ndef name_with_datetime(prefix='default'):\n    now = datetime.now()\n    return prefix + '_' + now.strftime(\"%Y%m%d_%H%M%S\")\n\ndef init_dl_program(\n    device_name,\n    seed=None,\n    use_cudnn=True,\n    deterministic=False,\n    benchmark=False,\n    use_tf32=False,\n    max_threads=None\n):\n    import torch\n    if max_threads is not None:\n        torch.set_num_threads(max_threads)  # intraop\n        if torch.get_num_interop_threads() != max_threads:\n            torch.set_num_interop_threads(max_threads)  # interop\n        try:\n            import mkl\n        except:\n            pass\n        else:\n            mkl.set_num_threads(max_threads)\n        \n    if seed is not None:\n        random.seed(seed)\n        np.random.seed(seed)\n        torch.manual_seed(seed)\n        \n    if isinstance(device_name, (str, int)):\n        device_name = [device_name]\n    \n    devices = []\n    for t in reversed(device_name):\n        t_device = torch.device(t)\n        devices.append(t_device)\n        if t_device.type == 'cuda':\n            assert torch.cuda.is_available()\n            torch.cuda.set_device(t_device)\n            if seed is not None:\n                torch.cuda.manual_seed(seed)\n                torch.cuda.manual_seed_all(seed)\n\n    devices.reverse()\n    torch.backends.cudnn.enabled = use_cudnn\n    torch.backends.cudnn.deterministic = deterministic\n    torch.backends.cudnn.benchmark = benchmark\n    \n    if hasattr(torch.backends.cudnn, 'allow_tf32'):\n        torch.backends.cudnn.allow_tf32 = use_tf32\n        torch.backends.cuda.matmul.allow_tf32 = use_tf32\n        \n    return devices if len(devices) > 1 else devices[0]\n\n\ndef split_N_pad(series,window_size):\n    assert len(series.shape)==2\n    ret=[]\n    l=series.shape[0]\n    for i in range(l//window_size):\n        ret.append(series[i*window_size:(i+1)*window_size,:])\n    left = l-l//window_size*window_size\n    '''TODO:pad'''\n    if left!=0:\n        p = np.zeros([window_size,series.shape[1]])\n        p[:left,:]=series[-left:,:]\n        ret.append(p)\n    return ret\n\n\n'''for AT'''\ndef data_slice(data,window_size):\n    '''\n    data : [size,length,dim]\n    '''\n    assert len(data.shape)==3\n    ret=[]\n    for i in range(data.shape[0]):\n        series = data[i]\n        ret.extend(split_N_pad(series,window_size))\n    return np.array(ret)"
  },
  {
    "path": "ts_classification_methods/.gitignore",
    "content": "*.log\n\ndilated_result\nfcn_result\nfcn_result_v2\nresult_v2\nrnn_result\n__pychache__\ndata/__pychache__\nlogs_v2\nlogs_v3\nlogs\nresult_v3\ncache\n/.idea/\n/test/test_env.py\n/test/test_path.py\n/test/train_test.py\n/ts2vec_cls/train_nonlin.py\n/tloss_cls/*.csv\n/selftime_cls/*.csv\n/tst_cls/*.csv\n/tst_cls/results\n/scripts/generator_uea.py\n/test/fcn_uea.py\n/test/dilated_uea.py\n/visualize_test.py\n/test/Wine/\n/test/Wine/test_dir\n/scripts/ex1_trasfer_finetune.sh\n/test/train_test2.py\n/ts2vec_cls/train_tsm_test.py\n/test/train_test3.py\n/tstcc_cls/semi_main_ucr.py\n/tfc_cls/new_dataset_test.py\n/tfc_cls/result_transfer/readme\n/result_tsm_lin/test_readme\n/result_tsm_lin/test_readme\n/result_tsm_lin/\n/tfc_cls/\n"
  },
  {
    "path": "ts_classification_methods/README.md",
    "content": "# A Survey on Time-Series Pre-Trained Models\n\nThis is the training code for our paper *\"A Survey on Time-Series Pre-Trained Models\"*\n\n\n## Pre-Trained Models on Time Series Classification\n### Usage (Transfer Learning)\n1. To pre-train a model on your own dataset, run\n\n```bash\npython train.py --dataroot [your UCR datasets directory] --task [type of pre-training task: classification or reconstruction] --dataset [name of the dataset you want to pretrain on] --backbone [fcn or dilated] --mode pretrain ...\n```\n\n2. To finetune (classification) the model on a dataset, run\n\n```bash\npython train.py --dataroot [your UCR datasets directory] --dataset [name of the dataset you want to finetune on] --source_dataset [the dataset you pretrained on] --save_dir [the directory to save the pretrained weights] --mode finetune ...\n\n```\n\nrun \n```bash \npython train.py -h\n```\n\nFor detailed options and examples, please refer to ```scripts/transfer_pretrain_finetune.sh```\n\n### Usage (Transformer and Contrastive Learning)\n| ID  | Method  | Architecture                       | Year | Press.            | Source Code |\n|-----| ----  |------------------------------------|------|-------------------| ---- | \n| 1   | [TS2Vec](https://www.aaai.org/AAAI22Papers/AAAI-8809.YueZ.pdf) | Contrastive Learning               | 2022 | AAAI              | [github-link](https://github.com/yuezhihan/ts2vec) |\n| 2   | [TS-TCC](https://www.ijcai.org/proceedings/2021/0324.pdf) | Contrastive Learning & Transformer | 2021 | IJCAI             | [github-link](https://github.com/emadeldeen24/TS-TCC) |\n| 3   | [TST](https://dl.acm.org/doi/10.1145/3447548.3467401) | Transformer                        | 2021 | KDD               | [github-link](https://github.com/gzerveas/mvts_transformer) |\n| 4   | [Triplet-loss](https://papers.nips.cc/paper/2019/hash/53c6de78244e9f528eb3e1cda69699bb-Abstract.html) | Contrastive Learning               | 2019 | NeurIPS           | [github-link](https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries) |\n| 5   | [SelfTime](https://openreview.net/pdf?id=qFQTP00Q0kp) | Contrastive Learning               | 2021 | Submitted to ICLR | [github-link](https://github.com/haoyfan/SelfTime) |\n| 6   | [TimesNet](https://openreview.net/pdf?id=ju_Uqw384Oq) | CNN                                | 2023 | ICLR              | [github-link](https://github.com/thuml/TimesNet) |\n| 7   | [PatchTST](https://openreview.net/pdf?id=Jbdc0vTOcol) | Transformer                        | 2023 | ICLR              | [github-link](https://github.com/yuqinie98/PatchTST) |\n| 8   | [GPT4TS](https://arxiv.org/abs/2302.11939) | GPT2                               | 2023 | NeurIPS           | [github-link](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All) |\n\n\n1. Pre-training and classification using **TS2Vec** model on a UCR dataset, run\n```bash \npython train_tsm.py --dataroot [your UCR datasets directory] --normalize_way single ...\n```\n\nFor detailed options and examples, please refer to ```ts2vec_cls/scripts/ts2vec_tsm_single_norm.sh```\n\n2. Pre-training and classification using **TS-TCC** model on a UCR dataset, run\n```bash \npython main_ucr.py --dataset [name of the ucr dataset] --device cuda:0 --save_csv_name tstcc_ucr_ --seed 42;\n```\n\nFor detailed options and examples, please refer to ```tstcc_cls/scripts/fivefold_tstcc_ucr.sh```\n\n3. To pre-train and classification using **TST** model on a UCR dataset, run\n```bash \npython src/main.py --dataset [dataset name] --data_dir [path of the dataset] --batch_size [batch size] --task pretrain_and_finetune --epochs\n```\n\nTo do classification task using Transformer encoder on a UCR dataset, run\n```bash\npython src/main.py --dataset [dataset name] --data_dir [path of the dataset] --batch_size [batch size] --task classification --epochs\n```\n\nFor detailed options and examples for training on the full UCR128 dataset, please refer to ```tst_cls/scripts/pretrain_finetune.sh``` and ```tst_cls/scripts/classification.sh```or simply run \n```bash\npython src/main.py -h\n```\n\n4. Pre-training and classification using **Triplet-loss** model on a UCR dataset, run\n```bash \npython ucr.py --dataset [name of the ucr dataset] --path [your UCR datasets directory] --hyper [hyperparameters file path(./default_hyperparameters.json for default option)] --cuda\n```\n\nFor detailed options and examples, please refer to ```tloss_cls/scripts/ucr.sh```\n\nPre-training and classification using **Triplet-loss** model on a UEA dataset, run\n```bash \npython uea.py --dataset [name of the uea dataset] --path [your UEA datasets directory] --hyper [hyperparameters file path(./default_hyperparameters.json for default option)] --cuda\n```\n\nFor detailed options and examples, please refer to ```tloss_cls/scripts/uea.sh```\n\n\n5. Pre-training and classification using **SelfTime** model on a UCR dataset, run\n```bash\npython -u train_ssl.py --dataset_name [dataset name] --model_name SelfTime --ucr_path [your UCR datasets directory] --random_seed 42\n```\n\n6. Pre-training and classification using **TimesNet** model on a UCR dataset, run\n```bash\npython -u main_timesnet_ucr.py --dataset_name [dataset name] --model_name SelfTime --ucr_path [your UCR datasets directory] --random_seed 42\n```\n\n\n7. Classification using **PatchTST** model on a UCR dataset, run\n```bash\npython -u main_patchtst_ucr.py --dataset_name [dataset name] --model_name SelfTime --ucr_path [your UCR datasets directory] --random_seed 42\n```\n\n8. Fine-tuning and classification using **GPT4TS** model on a UCR dataset, run\n```bash\npython -u main_gpt4ts_ucr.py --dataset_name [dataset name] --model_name SelfTime --ucr_path [your UCR datasets directory] --random_seed 42\n```\n\nFor detailed options and examples, please refer to ```selftime_cls/scripts/ucr.sh```\n\n### Usage (Visualization)\n* To get the visualization of model's feature map, run\n```bash\npython visualize.py --dataroot [your dataset root] --dataset [dataset name] --backbone [encoder backbone] --graph [cam, heatmap or tsne] \n```\n* We provide weights of Wine and GunPoint dataset for quick start."
  },
  {
    "path": "ts_classification_methods/data/__init__.py",
    "content": "from .preprocessing import *"
  },
  {
    "path": "ts_classification_methods/data/dataloader.py",
    "content": "import torch\nimport torch.utils.data as data\n\n\n# Dataset 仅用来加载5 fold中的一个fold\nclass UCRDataset(data.Dataset):\n    def __init__(self, dataset, target):\n        self.dataset = dataset\n        # self.dataset = np.expand_dims(self.dataset, 1)\n        if len(self.dataset.shape) == 2:\n            self.dataset = torch.unsqueeze(self.dataset, 1)  # (num_size, 1, series_length)\n        self.target = target\n\n    def __getitem__(self, index):\n        return self.dataset[index], self.target[index]\n\n    def __len__(self):\n        return len(self.target)\n\n\nclass UEADataset(data.Dataset):\n    def __init__(self, dataset, target):\n        self.dataset = dataset.permute(0, 2, 1)  # (num_size, num_dimensions, series_length)\n        self.target = target\n\n    def __getitem__(self, index):\n        return self.dataset[index], self.target[index]\n\n    def __len__(self):\n        return len(self.target)\n\n\nif __name__ == '__main__':\n    pass\n    '''\n    train = pd.read_csv('/dev_data/zzj/hzy/datasets/UCR/Adiac/Adiac_TRAIN.tsv', sep='\\t', header=None)\n\n    train_target = train.iloc[:, 0]\n    train_x = train.iloc[:, 1:]\n    print(train_x.to_numpy())\n    '''\n"
  },
  {
    "path": "ts_classification_methods/data/preprocessing.py",
    "content": "import os\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.io.arff import loadarff\nfrom sklearn.model_selection import StratifiedKFold\nfrom tslearn.preprocessing import TimeSeriesScalerMeanVariance\n\n\ndef load_data(dataroot, dataset):\n    train = pd.read_csv(os.path.join(dataroot, dataset, dataset + '_TRAIN.tsv'), sep='\\t', header=None)\n    train_x = train.iloc[:, 1:]\n    train_target = train.iloc[:, 0]\n\n    test = pd.read_csv(os.path.join(dataroot, dataset, dataset + '_TEST.tsv'), sep='\\t', header=None)\n    test_x = test.iloc[:, 1:]\n    test_target = test.iloc[:, 0]\n\n    sum_dataset = pd.concat([train_x, test_x]).to_numpy(dtype=np.float32)\n    # sum_dataset = sum_dataset.fillna(sum_dataset.mean()).to_numpy(dtype=np.float32)\n    sum_target = pd.concat([train_target, test_target]).to_numpy(dtype=np.float32)\n    # sum_target = sum_target.fillna(sum_target.mean()).to_numpy(dtype=np.float32)\n\n    num_classes = len(np.unique(sum_target))\n\n    return sum_dataset, sum_target, num_classes\n\n\ndef load_UEA(dataroot, dataset):\n    '''\n    scipy 1.3.0 or newer is required to load. Otherwise, the data cannot be loaded.\n    '''\n    train_data = loadarff(os.path.join(dataroot, dataset, dataset + '_TRAIN.arff'))[0]\n    test_data = loadarff(os.path.join(dataroot, dataset, dataset + '_TEST.arff'))[0]\n\n    def extract_data(data):\n        res_data = []\n        res_labels = []\n        for t_data, t_label in data:\n            t_data = np.array([d.tolist() for d in t_data])\n            t_label = t_label.decode(\"utf-8\")\n            res_data.append(t_data)\n            res_labels.append(t_label)\n        return np.array(res_data).swapaxes(1, 2), np.array(res_labels)\n\n    train_X, train_y = extract_data(train_data)\n    test_X, test_y = extract_data(test_data)\n\n    labels = np.unique(train_y)\n    transform = {k: i for i, k in enumerate(labels)}\n    train_y = np.vectorize(transform.get)(train_y)\n    test_y = np.vectorize(transform.get)(test_y)\n    sum_dataset = np.concatenate((train_X, test_X), axis=0,\n                                 dtype=np.float32)  # (num_size, series_length, num_dimensions)\n    sum_target = np.concatenate((train_y, test_y), axis=0, dtype=np.float32)\n    num_classes = len(np.unique(sum_target))\n    return sum_dataset, sum_target, num_classes\n\n\ndef transfer_labels(labels):\n    indicies = np.unique(labels)\n    num_samples = labels.shape[0]\n\n    for i in range(num_samples):\n        new_label = np.argwhere(labels[i] == indicies)[0][0]\n        labels[i] = new_label\n\n    return labels\n\n\ndef k_fold(data, target):\n    skf = StratifiedKFold(5, shuffle=True)\n    # skf = StratifiedShuffleSplit(5)\n    train_sets = []\n    train_targets = []\n\n    val_sets = []\n    val_targets = []\n\n    test_sets = []\n    test_targets = []\n\n    for raw_index, test_index in skf.split(data, target):\n        raw_set = data[raw_index]\n        raw_target = target[raw_index]\n\n        test_sets.append(data[test_index])\n        test_targets.append(target[test_index])\n\n        train_index, val_index = next(StratifiedKFold(4, shuffle=True).split(raw_set, raw_target))\n        # train_index, val_index = next(StratifiedShuffleSplit(1).split(raw_set, raw_target))\n        train_sets.append(raw_set[train_index])\n        train_targets.append(raw_target[train_index])\n\n        val_sets.append(raw_set[val_index])\n        val_targets.append(raw_target[val_index])\n\n    return train_sets, train_targets, val_sets, val_targets, test_sets, test_targets\n\n\ndef normalize_per_series(data):\n    std_ = data.std(axis=1, keepdims=True)\n    std_[std_ == 0] = 1.0\n    return (data - data.mean(axis=1, keepdims=True)) / std_\n\n\ndef normalize_train_val_test(train_set, val_set, test_set):\n    mean = train_set.mean()\n    std = train_set.std()\n    return (train_set - mean) / std, (val_set - mean) / std, (test_set - mean) / std\n\n\ndef normalize_uea_set(data_set):\n    '''\n    The function is the same as normalize_per_series, but can be used for multiple variables.\n    '''\n    return TimeSeriesScalerMeanVariance().fit_transform(data_set)\n\n\ndef fill_nan_value(train_set, val_set, test_set):\n    ind = np.where(np.isnan(train_set))\n    col_mean = np.nanmean(train_set, axis=0)\n    col_mean[np.isnan(col_mean)] = 1e-6\n\n    train_set[ind] = np.take(col_mean, ind[1])\n\n    ind_val = np.where(np.isnan(val_set))\n    val_set[ind_val] = np.take(col_mean, ind_val[1])\n\n    ind_test = np.where(np.isnan(test_set))\n    test_set[ind_test] = np.take(col_mean, ind_test[1])\n    return train_set, val_set, test_set\n\n\nif __name__ == '__main__':\n    pass\n"
  },
  {
    "path": "ts_classification_methods/environment.yaml",
    "content": "name: from_transfer_to_transformer\nchannels:\n  - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/\n  - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch\n  - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/\ndependencies:\n  - python=3.9.7\n  - numpy=1.21.2\n  - pytorch=1.10.2\n  - scikit-learn=1.0.2\n  - scipy=1.7.3\n  - pandas=1.4.1\n  - tslearn=0.5.2\n"
  },
  {
    "path": "ts_classification_methods/gpt4ts/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/gpt4ts/gpt4ts_utils.py",
    "content": "import os\nimport torch.utils.data as data\nimport numpy as np\nimport pandas as pd\nfrom scipy.io.arff import loadarff\nfrom sklearn.model_selection import StratifiedKFold\nfrom tslearn.preprocessing import TimeSeriesScalerMeanVariance\n\nimport random\nimport torch\nimport torch.nn as nn\n\n\ndef build_dataset(args):\n    sum_dataset, sum_target, num_classes = load_data(args.dataroot, args.dataset)\n\n    sum_target = transfer_labels(sum_target)\n    return sum_dataset, sum_target, num_classes\n\n\ndef load_data(dataroot, dataset):\n    train = pd.read_csv(os.path.join(dataroot, dataset, dataset + '_TRAIN.tsv'), sep='\\t', header=None)\n    train_x = train.iloc[:, 1:]\n    train_target = train.iloc[:, 0]\n\n    test = pd.read_csv(os.path.join(dataroot, dataset, dataset + '_TEST.tsv'), sep='\\t', header=None)\n    test_x = test.iloc[:, 1:]\n    test_target = test.iloc[:, 0]\n\n    sum_dataset = pd.concat([train_x, test_x]).to_numpy(dtype=np.float32)\n    sum_target = pd.concat([train_target, test_target]).to_numpy(dtype=np.float32)\n\n    num_classes = len(np.unique(sum_target))\n\n    return sum_dataset, sum_target, num_classes\n\n\ndef normalize_per_series(data):\n    std_ = data.std(axis=1, keepdims=True)\n    std_[std_ == 0] = 1.0\n    return (data - data.mean(axis=1, keepdims=True)) / std_\n\n\n\ndef load_UEA(dataroot, dataset):\n    '''\n    scipy 1.3.0 or newer is required to load. Otherwise, the data cannot be loaded.\n    '''\n    train_data = loadarff(os.path.join(dataroot, dataset, dataset + '_TRAIN.arff'))[0]\n    test_data = loadarff(os.path.join(dataroot, dataset, dataset + '_TEST.arff'))[0]\n\n    def extract_data(data_set):\n        res_data = []\n        res_labels = []\n        for t_data, t_label in data_set:\n            t_data = np.array([d.tolist() for d in t_data])\n            t_label = t_label.decode(\"utf-8\")\n            res_data.append(t_data)\n            res_labels.append(t_label)\n        return np.array(res_data).swapaxes(1, 2), np.array(res_labels)\n\n    train_X, train_y = extract_data(train_data)\n    test_X, test_y = extract_data(test_data)\n\n    labels = np.unique(train_y)\n    transform = {k: i for i, k in enumerate(labels)}\n    train_y = np.vectorize(transform.get)(train_y)\n    test_y = np.vectorize(transform.get)(test_y)\n    sum_dataset = np.concatenate((train_X, test_X), axis=0,\n                                 dtype=np.float32)  # (num_size, series_length, num_dimensions)\n    sum_target = np.concatenate((train_y, test_y), axis=0, dtype=np.float32)\n    num_classes = len(np.unique(sum_target))\n    return sum_dataset, sum_target, num_classes\n\n\ndef transfer_labels(labels):\n    indicies = np.unique(labels)\n    num_samples = labels.shape[0]\n\n    for i in range(num_samples):\n        new_label = np.argwhere(labels[i] == indicies)[0][0]\n        labels[i] = new_label\n\n    return labels\n\n\ndef k_fold(data_set, target):\n    skf = StratifiedKFold(5, shuffle=True)\n    # skf = StratifiedShuffleSplit(5)\n    train_sets = []\n    train_targets = []\n\n    val_sets = []\n    val_targets = []\n\n    test_sets = []\n    test_targets = []\n\n    for raw_index, test_index in skf.split(data_set, target):\n        raw_set = data_set[raw_index]\n        raw_target = target[raw_index]\n\n        test_sets.append(data_set[test_index])\n        test_targets.append(target[test_index])\n\n        train_index, val_index = next(StratifiedKFold(4, shuffle=True).split(raw_set, raw_target))\n        # train_index, val_index = next(StratifiedShuffleSplit(1).split(raw_set, raw_target))\n        train_sets.append(raw_set[train_index])\n        train_targets.append(raw_target[train_index])\n\n        val_sets.append(raw_set[val_index])\n        val_targets.append(raw_target[val_index])\n\n    return train_sets, train_targets, val_sets, val_targets, test_sets, test_targets\n\n\ndef normalize_uea_set(data_set):\n    '''\n    The function is the same as normalize_per_series, but can be used for multiple variables.\n    '''\n    return TimeSeriesScalerMeanVariance().fit_transform(data_set)\n\n\ndef fill_nan_value(train_set, val_set, test_set):\n    ind = np.where(np.isnan(train_set))\n    col_mean = np.nanmean(train_set, axis=0)\n    col_mean[np.isnan(col_mean)] = 1e-6\n\n    train_set[ind] = np.take(col_mean, ind[1])\n\n    ind_val = np.where(np.isnan(val_set))\n    val_set[ind_val] = np.take(col_mean, ind_val[1])\n\n    ind_test = np.where(np.isnan(test_set))\n    test_set[ind_test] = np.take(col_mean, ind_test[1])\n    return train_set, val_set, test_set\n\n\nclass UEADataset(data.Dataset):\n    def __init__(self, dataset, target):\n        self.dataset = dataset.permute(0, 2, 1)  # (num_size, num_dimensions, series_length)\n        self.target = target\n\n    def __getitem__(self, index):\n        return self.dataset[index], self.target[index]\n\n    def __len__(self):\n        return len(self.target)\n\n\ndef save_cls_new_result(args, mean_accu, max_acc, min_acc, std_acc, train_time):\n    save_path = os.path.join(args.save_dir, '', args.save_csv_name + '_sup_cls_result.csv')\n    if os.path.exists(save_path):\n        result_form = pd.read_csv(save_path, index_col=0)\n    else:\n        result_form = pd.DataFrame(\n            columns=['dataset_name', 'mean_accu', 'max_acc', 'min_acc', 'std_acc', 'train_time'])\n\n    result_form = result_form.append(\n        {'dataset_name': args.dataset, 'mean_accu': '%.4f' % mean_accu, 'max_acc': '%.4f' % max_acc,\n         'min_acc': '%.4f' % min_acc,\n         'std_acc': '%.4f' % std_acc,\n         'train_time': '%.4f' % train_time\n         }, ignore_index=True)\n\n    result_form.to_csv(save_path, index=True, index_label=\"id\")\n\n\ndef set_seed(args):\n    random.seed(args.random_seed)\n    np.random.seed(args.random_seed)\n    torch.manual_seed(args.random_seed)\n    torch.cuda.manual_seed(args.random_seed)\n    torch.cuda.manual_seed_all(args.random_seed)\n\n\ndef get_all_datasets(data_set, target):\n    return k_fold(data_set, target)\n\n\n\ndef cross_entropy():\n    loss = nn.CrossEntropyLoss()\n    return loss\n\n\ndef reconstruction_loss():\n    loss = nn.MSELoss()\n    return loss\n\n\ndef build_loss(args):\n    if args.loss == 'cross_entropy':\n        return cross_entropy()\n    elif args.loss == 'reconstruction':\n        return reconstruction_loss()"
  },
  {
    "path": "ts_classification_methods/gpt4ts/main_gpt4ts.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom gpt4ts.gpt4ts_utils import load_UEA, normalize_uea_set, UEADataset, save_cls_new_result, set_seed, fill_nan_value, get_all_datasets, build_loss\n\nfrom gpt4ts.models.gpt4ts import gpt4ts\n\n\ndef evaluate_gpt4ts(val_loader, model, loss):\n    val_loss = 0\n    val_accu = 0\n\n    sum_len = 0\n    for data, target in val_loader:\n        '''\n        data, target = data.to(device), target.to(device)\n        target = target.to(torch.int64)\n        '''\n        with torch.no_grad():\n            val_pred = model(data)\n            val_loss += loss(val_pred, target).item()\n            val_accu += torch.sum(torch.argmax(val_pred.data, axis=1) == target)\n            sum_len += len(target)\n\n    return val_loss / sum_len, val_accu / sum_len\n\n\nif __name__ == '__main__':  ##\n    parser = argparse.ArgumentParser()\n\n    # Base setup\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n    # Dataset setup\n    parser.add_argument('--dataset', type=str, default='LSST',\n                        help='dataset(in ucr)')  # LSST Heartbeat Images\n    # parser.add_argument('--dataroot', type=str, default='../UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/dev_data/lz/time_series_pretrain/datasets/UCRArchive_2018',\n    #                     help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018', help='path of UCR folder')\n    parser.add_argument('--dataroot', type=str, default='/dev_data/lz/Multivariate2018_arff', help='path of UEA folder')\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n    parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n    parser.add_argument('--patch_size', type=int, default=8, help='patch_size')\n    parser.add_argument('--stride', type=int, default=8, help='stride')\n\n    # Semi training\n    parser.add_argument('--labeled_ratio', type=float, default='0.1', help='0.1, 0.2, 0.4')\n\n    # training setup\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--batch_size', type=int, default=128, help='')\n    parser.add_argument('--epoch', type=int, default=100, help='training epoch')\n    parser.add_argument('--cuda', type=str, default='cuda:1')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/time_series_label_noise/result')\n    parser.add_argument('--save_csv_name', type=str, default='gpt4ts_uea_supervised_0712_')\n\n    # classifier setup\n    parser.add_argument('--classifier', type=str, default='linear', help='type of classifier(linear or nonlinear)')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n\n    args = parser.parse_args()\n\n    device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n    set_seed(args)\n\n    # sum_dataset, sum_target, num_classes = build_dataset(args)\n    sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n    # args.num_classes = num_classes\n    # args.seq_len = sum_dataset.shape[1]\n\n    args.num_classes = num_classes\n    args.seq_len = sum_dataset.shape[1]\n    args.input_size = sum_dataset.shape[2]\n\n    while sum_dataset.shape[0] * 0.6 < args.batch_size:\n        args.batch_size = args.batch_size // 2\n\n    print(\"args.batch_size = \", args.batch_size, \", sum_dataset.shape = \", sum_dataset.shape)\n\n    model = gpt4ts(max_seq_len=args.seq_len, num_classes=args.num_classes, var_len=args.input_size, patch_size=args.patch_size, stride=args.stride)\n    model = model.to(device)\n\n    # model, classifier = build_model(args)\n    # model, classifier = model.to(device), classifier.to(device)\n    loss = build_loss(args).to(device)\n\n    model_init_state = model.state_dict()\n    # classifier_init_state = classifier.state_dict()\n\n    if args.optimizer == 'adam':\n        optimizer = torch.optim.Adam([{'params': model.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    losses = []\n    test_accuracies = []\n    train_time = 0.0\n    end_val_epochs = []\n\n    for i, train_dataset in enumerate(train_datasets):\n        t = time.time()\n        model.load_state_dict(model_init_state)\n        # classifier.load_state_dict(classifier_init_state)\n        print('{} fold start training and evaluate'.format(i))\n\n        train_target = train_targets[i]\n        val_dataset = val_datasets[i]\n        val_target = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_uea_set(train_dataset)\n            val_dataset = normalize_uea_set(val_dataset)\n            test_dataset = normalize_uea_set(test_dataset)\n        # else:\n        #     train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n        #                                                                         test_dataset)\n\n        train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device),\n                               torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device),\n                             torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device),\n                              torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True)\n        val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0)\n        test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0)\n\n        train_loss = []\n        train_accuracy = []\n        num_steps = args.epoch // args.batch_size\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        num_steps = train_set.__len__() // args.batch_size\n\n        min_val_loss = float('inf')\n        test_accuracy = 0\n        end_val_epoch = 0\n\n        for epoch in range(args.epoch):\n\n            if stop_count == 50 or increase_count == 50:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n\n            epoch_train_loss = 0\n            epoch_train_acc = 0\n            num_iterations = 0\n\n            model.train()\n            train_embed = []\n\n            for x, y in train_loader:\n                optimizer.zero_grad()\n                pred = model(x)\n                step_loss = loss(pred, y)\n\n                # step_loss.backward(retain_graph=True)\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_train_loss += step_loss.item()\n                epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                num_iterations += 1\n\n            epoch_train_loss /= num_steps\n            epoch_train_acc /= num_steps\n            # train_embed = np.concatenate(train_embed)\n\n            model.eval()\n\n            val_loss, val_accu = evaluate_gpt4ts(val_loader, model, loss)\n            if min_val_loss > val_loss:\n                min_val_loss = val_loss\n                end_val_epoch = epoch\n                test_loss, test_accuracy = evaluate_gpt4ts(test_loader, model, loss)\n\n            if abs(last_loss - val_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if val_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = val_loss\n\n            if epoch % 50 == 0:\n                print(\n                    \"epoch : {}, train loss: {} , train accuracy : {}, \\ntest_accuracy : {}\".format(\n                        epoch, epoch_train_loss, epoch_train_acc, test_accuracy))\n\n        test_accuracies.append(test_accuracy)\n        end_val_epochs.append(end_val_epoch)\n        t = time.time() - t\n        train_time += t\n\n        print('{} fold finish training'.format(i))\n\n    test_accuracies = torch.Tensor(test_accuracies)\n\n    print(\"Training end: mean_test_acc = \", round(torch.mean(test_accuracies).item(), 4),\n          \"traning time (seconds) = \",\n          round(train_time, 4), \", seed = \", args.random_seed)\n\n    test_accuracies = test_accuracies.cpu().numpy()\n\n    save_cls_new_result(args, np.mean(test_accuracies), np.max(test_accuracies), np.min(test_accuracies),\n                        np.std(test_accuracies), train_time)\n\n    print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/gpt4ts/main_gpt4ts_ucr.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom gpt4ts.gpt4ts_utils import load_UEA, normalize_uea_set, UEADataset, save_cls_new_result, set_seed, fill_nan_value, get_all_datasets, build_loss, build_dataset, normalize_per_series\n\nfrom gpt4ts.models.gpt4ts import gpt4ts\n\n\ndef evaluate_gpt4ts(val_loader, model, loss):\n    val_loss = 0\n    val_accu = 0\n\n    sum_len = 0\n    for data, target in val_loader:\n        '''\n        data, target = data.to(device), target.to(device)\n        target = target.to(torch.int64)\n        '''\n        with torch.no_grad():\n            val_pred = model(data)\n            val_loss += loss(val_pred, target).item()\n            val_accu += torch.sum(torch.argmax(val_pred.data, axis=1) == target)\n            sum_len += len(target)\n\n    return val_loss / sum_len, val_accu / sum_len\n\n\nif __name__ == '__main__':  ##\n    parser = argparse.ArgumentParser()\n\n    # Base setup\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n    # Dataset setup\n    parser.add_argument('--dataset', type=str, default='CBF',\n                        help='dataset(in ucr)')  # LSST Heartbeat Images\n    # parser.add_argument('--dataroot', type=str, default='../UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/dev_data/lz/time_series_pretrain/datasets/UCRArchive_2018',\n    #                     help='path of UCR folder')\n    parser.add_argument('--dataroot', type=str, default='/dev_data/lz/UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/Multivariate2018_arff', help='path of UEA folder')\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n    parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n    parser.add_argument('--patch_size', type=int, default=8, help='patch_size')\n    parser.add_argument('--stride', type=int, default=8, help='stride')\n\n    # Semi training\n    parser.add_argument('--labeled_ratio', type=float, default='0.1', help='0.1, 0.2, 0.4')\n\n    # training setup\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--batch_size', type=int, default=128, help='')\n    parser.add_argument('--epoch', type=int, default=100, help='training epoch')\n    parser.add_argument('--cuda', type=str, default='cuda:1')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/time_series_label_noise/result')\n    parser.add_argument('--save_csv_name', type=str, default='gpt4ts_ucr_supervised_0712_')\n\n    # classifier setup\n    parser.add_argument('--classifier', type=str, default='linear', help='type of classifier(linear or nonlinear)')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n\n    args = parser.parse_args()\n\n    device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n    set_seed(args)\n\n    sum_dataset, sum_target, num_classes = build_dataset(args)\n    sum_dataset = sum_dataset[:, :, np.newaxis]\n    # sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n    # args.num_classes = num_classes\n    # args.seq_len = sum_dataset.shape[1]\n\n    args.num_classes = num_classes\n    args.seq_len = sum_dataset.shape[1]\n    args.input_size = sum_dataset.shape[2]\n\n    while sum_dataset.shape[0] * 0.6 < args.batch_size:\n        args.batch_size = args.batch_size // 2\n\n    print(\"args.batch_size = \", args.batch_size, \", sum_dataset.shape = \", sum_dataset.shape)\n\n    model = gpt4ts(max_seq_len=args.seq_len, num_classes=args.num_classes, var_len=args.input_size, patch_size=args.patch_size, stride=args.stride)\n    model = model.to(device)\n\n    # model, classifier = build_model(args)\n    # model, classifier = model.to(device), classifier.to(device)\n    loss = build_loss(args).to(device)\n\n    model_init_state = model.state_dict()\n    # classifier_init_state = classifier.state_dict()\n\n    if args.optimizer == 'adam':\n        optimizer = torch.optim.Adam([{'params': model.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    losses = []\n    test_accuracies = []\n    train_time = 0.0\n    end_val_epochs = []\n\n    for i, train_dataset in enumerate(train_datasets):\n        t = time.time()\n        model.load_state_dict(model_init_state)\n        # classifier.load_state_dict(classifier_init_state)\n        print('{} fold start training and evaluate'.format(i))\n\n        train_target = train_targets[i]\n        val_dataset = val_datasets[i]\n        val_target = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_per_series(train_dataset)\n            val_dataset = normalize_per_series(val_dataset)\n            test_dataset = normalize_per_series(test_dataset)\n        # else:\n        #     train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n        #                                                                         test_dataset)\n\n        train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device),\n                               torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device),\n                             torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device),\n                              torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True)\n        val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0)\n        test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0)\n\n        train_loss = []\n        train_accuracy = []\n        num_steps = args.epoch // args.batch_size\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        num_steps = train_set.__len__() // args.batch_size\n\n        min_val_loss = float('inf')\n        test_accuracy = 0\n        end_val_epoch = 0\n\n        for epoch in range(args.epoch):\n\n            if stop_count == 50 or increase_count == 50:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n\n            epoch_train_loss = 0\n            epoch_train_acc = 0\n            num_iterations = 0\n\n            model.train()\n            train_embed = []\n\n            for x, y in train_loader:\n                optimizer.zero_grad()\n                pred = model(x)\n                step_loss = loss(pred, y)\n\n                # step_loss.backward(retain_graph=True)\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_train_loss += step_loss.item()\n                epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                num_iterations += 1\n\n            epoch_train_loss /= num_steps\n            epoch_train_acc /= num_steps\n            # train_embed = np.concatenate(train_embed)\n\n            model.eval()\n\n            val_loss, val_accu = evaluate_gpt4ts(val_loader, model, loss)\n            if min_val_loss > val_loss:\n                min_val_loss = val_loss\n                end_val_epoch = epoch\n                test_loss, test_accuracy = evaluate_gpt4ts(test_loader, model, loss)\n\n            if abs(last_loss - val_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if val_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = val_loss\n\n            if epoch % 50 == 0:\n                print(\n                    \"epoch : {}, train loss: {} , train accuracy : {}, \\ntest_accuracy : {}\".format(\n                        epoch, epoch_train_loss, epoch_train_acc, test_accuracy))\n\n        test_accuracies.append(test_accuracy)\n        end_val_epochs.append(end_val_epoch)\n        t = time.time() - t\n        train_time += t\n\n        print('{} fold finish training'.format(i))\n\n    test_accuracies = torch.Tensor(test_accuracies)\n\n    print(\"Training end: mean_test_acc = \", round(torch.mean(test_accuracies).item(), 4),\n          \"traning time (seconds) = \",\n          round(train_time, 4), \", seed = \", args.random_seed)\n\n    test_accuracies = test_accuracies.cpu().numpy()\n\n    save_cls_new_result(args, np.mean(test_accuracies), np.max(test_accuracies), np.min(test_accuracies),\n                        np.std(test_accuracies), train_time)\n\n    print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/gpt4ts/models/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/gpt4ts/models/embed.py",
    "content": "import torch\nimport torch.nn as nn\nimport math\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, max_len=25000):\n        super(PositionalEmbedding, self).__init__()\n        # Compute the positional encodings once in log space.\n        pe = torch.zeros(max_len, d_model).float()\n        pe.require_grad = False\n\n        position = torch.arange(0, max_len).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n\n        pe = pe.unsqueeze(0)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        return self.pe[:, :x.size(1)]\n\n\nclass TokenEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(TokenEmbedding, self).__init__()\n        padding = 1 if torch.__version__ >= '1.5.0' else 2\n        self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,\n                                   kernel_size=3, padding=padding, padding_mode='circular', bias=False)\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.kaiming_normal_(\n                    m.weight, mode='fan_in', nonlinearity='leaky_relu')\n\n    def forward(self, x):\n        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)\n        return x\n\n\nclass FixedEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(FixedEmbedding, self).__init__()\n\n        w = torch.zeros(c_in, d_model).float()\n        w.require_grad = False\n\n        position = torch.arange(0, c_in).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        w[:, 0::2] = torch.sin(position * div_term)\n        w[:, 1::2] = torch.cos(position * div_term)\n\n        self.emb = nn.Embedding(c_in, d_model)\n        self.emb.weight = nn.Parameter(w, requires_grad=False)\n\n    def forward(self, x):\n        return self.emb(x).detach()\n\n\nclass TemporalEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='fixed', freq='h'):\n        super(TemporalEmbedding, self).__init__()\n\n        minute_size = 4\n        hour_size = 24\n        weekday_size = 7\n        day_size = 32\n        month_size = 13\n\n        Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding\n        if freq == 't':\n            self.minute_embed = Embed(minute_size, d_model)\n        self.hour_embed = Embed(hour_size, d_model)\n        self.weekday_embed = Embed(weekday_size, d_model)\n        self.day_embed = Embed(day_size, d_model)\n        self.month_embed = Embed(month_size, d_model)\n\n    def forward(self, x):\n        x = x.long()\n        minute_x = self.minute_embed(x[:, :, 4]) if hasattr(\n            self, 'minute_embed') else 0.\n        hour_x = self.hour_embed(x[:, :, 3])\n        weekday_x = self.weekday_embed(x[:, :, 2])\n        day_x = self.day_embed(x[:, :, 1])\n        month_x = self.month_embed(x[:, :, 0])\n\n        return hour_x + weekday_x + day_x + month_x + minute_x\n\n\nclass TimeFeatureEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='timeF', freq='h'):\n        super(TimeFeatureEmbedding, self).__init__()\n\n        freq_map = {'h': 4, 't': 5, 's': 6,\n                    'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3}\n        d_inp = freq_map[freq]\n        self.embed = nn.Linear(d_inp, d_model, bias=False)\n\n    def forward(self, x):\n        return self.embed(x)\n\n\nclass DataEmbedding(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x) + self.position_embedding(x)\n        else:\n            x = self.value_embedding(\n                x) + self.temporal_embedding(x_mark) + self.position_embedding(x)\n        return self.dropout(x)\n\n\nclass DataEmbedding_wo_pos(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_wo_pos, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x)\n        else:\n            x = self.value_embedding(x) + self.temporal_embedding(x_mark)\n        return self.dropout(x)\n\n\nclass PatchEmbedding(nn.Module):\n    def __init__(self, d_model, patch_len, stride, dropout):\n        super(PatchEmbedding, self).__init__()\n        # Patching\n        self.patch_len = patch_len\n        self.stride = stride\n        self.padding_patch_layer = nn.ReplicationPad1d((0, stride))\n\n        # Backbone, Input encoding: projection of feature vectors onto a d-dim vector space\n        self.value_embedding = TokenEmbedding(patch_len, d_model)\n\n        # Positional embedding\n        self.position_embedding = PositionalEmbedding(d_model)\n\n        # Residual dropout\n        self.dropout = nn.Dropout(dropout)\n\n    def forward(self, x):\n        # do patching\n        n_vars = x.shape[1]\n        x = self.padding_patch_layer(x)\n        x = x.unfold(dimension=-1, size=self.patch_len, step=self.stride)\n        x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3]))\n        # Input encoding\n        x = self.value_embedding(x) + self.position_embedding(x)\n        return self.dropout(x), n_vars\n\nclass DataEmbedding_wo_time(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_wo_time, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x):\n        x = self.value_embedding(x) + self.position_embedding(x)\n        return self.dropout(x)\n"
  },
  {
    "path": "ts_classification_methods/gpt4ts/models/gpt4ts.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Model\nfrom einops import rearrange\nfrom gpt4ts.models.embed import DataEmbedding\n\n\nclass gpt4ts(nn.Module):\n\n    def __init__(self, max_seq_len, num_classes, var_len, d_model=768, patch_size=8, stride=8, dropout=0.1):\n        super(gpt4ts, self).__init__()\n        self.pred_len = 0\n        self.seq_len = max_seq_len\n        self.max_len = max_seq_len\n        self.patch_size = patch_size\n        self.stride = stride\n        self.gpt_layers = 6\n        self.feat_dim = var_len\n        self.num_classes = num_classes\n        self.d_model = d_model\n\n        self.patch_num = (self.seq_len - self.patch_size) // self.stride + 1\n\n        self.padding_patch_layer = nn.ReplicationPad1d((0, self.stride))\n        self.patch_num += 1\n        self.enc_embedding = DataEmbedding(self.feat_dim * self.patch_size, d_model, dropout)\n\n        self.gpt2 = GPT2Model.from_pretrained('/SSD/lz/gpt2', output_attentions=True, output_hidden_states=True)\n        self.gpt2.h = self.gpt2.h[:self.gpt_layers]\n\n        self.gpt2 =  self.gpt2.apply(self.gpt2._init_weights)\n\n        for i, (name, param) in enumerate(self.gpt2.named_parameters()):\n            if 'ln' in name or 'wpe' in name:\n                param.requires_grad = True\n                # param.requires_grad = False\n            else:\n                param.requires_grad = False\n\n        device = torch.device('cuda:{}'.format(0))\n        self.gpt2.to(device=device)\n\n        self.act = F.gelu\n        self.dropout = nn.Dropout(0.1)\n        # self.ln_proj = nn.LayerNorm(config['d_model'] * self.patch_num)\n\n        self.ln_proj = nn.LayerNorm(d_model * self.patch_num)\n        self.out_layer = nn.Linear(d_model * self.patch_num, self.num_classes)\n\n    def forward(self, x_enc, x_mark_enc=None):\n        x_enc = x_enc.permute(0,2,1)\n        B, L, M = x_enc.shape\n\n        # print(\"x_enc.shape = \", x_enc.shape, B, L, M)\n\n        input_x = rearrange(x_enc, 'b l m -> b m l')\n        # print(\"input_x.shape = \", input_x.shape)\n        input_x = self.padding_patch_layer(input_x)\n        # print(\"patch1 input_x.shape = \", input_x.shape)\n        input_x = input_x.unfold(dimension=-1, size=self.patch_size, step=self.stride)\n        # print(\"patch2 input_x.shape = \", input_x.shape)\n        input_x = rearrange(input_x, 'b m n p -> b n (p m)')\n        # print(\"patch3 input_x.shape = \", input_x.shape)\n        outputs = self.enc_embedding(input_x, None)\n        # print(\"patch4 embd input_x.shape = \", outputs.shape)\n        outputs = self.gpt2(inputs_embeds=outputs).last_hidden_state\n        # print(\"patch5 gpt2 embd input_x.shape = \", outputs.shape)\n        outputs = self.act(outputs).reshape(B, -1)\n        outputs = self.ln_proj(outputs)\n        outputs = self.out_layer(outputs)\n\n        return outputs\n\n"
  },
  {
    "path": "ts_classification_methods/gpt4ts/models/loss.py",
    "content": "import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\n\ndef get_loss_module(config):\n\n    task = config['task']\n\n    if (task == \"imputation\") or (task == \"transduction\"):\n        return MaskedMSELoss(reduction='none')  # outputs loss for each batch element\n\n    if task == \"classification\":\n        return NoFussCrossEntropyLoss(reduction='none')  # outputs loss for each batch sample\n\n    if task == \"regression\":\n        return nn.MSELoss(reduction='none')  # outputs loss for each batch sample\n\n    else:\n        raise ValueError(\"Loss module for task '{}' does not exist\".format(task))\n\n\ndef l2_reg_loss(model):\n    \"\"\"Returns the squared L2 norm of output layer of given model\"\"\"\n\n    for name, param in model.named_parameters():\n        if name == 'output_layer.weight':\n            return torch.sum(torch.square(param))\n\n\nclass NoFussCrossEntropyLoss(nn.CrossEntropyLoss):\n    \"\"\"\n    pytorch's CrossEntropyLoss is fussy: 1) needs Long (int64) targets only, and 2) only 1D.\n    This function satisfies these requirements\n    \"\"\"\n\n    def forward(self, inp, target):\n        return F.cross_entropy(inp, target.long().squeeze(), weight=self.weight,\n                               ignore_index=self.ignore_index, reduction=self.reduction)\n\n\nclass MaskedMSELoss(nn.Module):\n    \"\"\" Masked MSE Loss\n    \"\"\"\n\n    def __init__(self, reduction: str = 'mean'):\n\n        super().__init__()\n\n        self.reduction = reduction\n        self.mse_loss = nn.MSELoss(reduction=self.reduction)\n\n    def forward(self,\n                y_pred: torch.Tensor, y_true: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:\n        \"\"\"Compute the loss between a target value and a prediction.\n\n        Args:\n            y_pred: Estimated values\n            y_true: Target values\n            mask: boolean tensor with 0s at places where values should be ignored and 1s where they should be considered\n\n        Returns\n        -------\n        if reduction == 'none':\n            (num_active,) Loss for each active batch element as a tensor with gradient attached.\n        if reduction == 'mean':\n            scalar mean loss over batch as a tensor with gradient attached.\n        \"\"\"\n\n        # for this particular loss, one may also elementwise multiply y_pred and y_true with the inverted mask\n        masked_pred = torch.masked_select(y_pred, mask)\n        masked_true = torch.masked_select(y_true, mask)\n\n        return self.mse_loss(masked_pred, masked_true)\n"
  },
  {
    "path": "ts_classification_methods/gpt4ts/scripts/generator_gpt4ts.py",
    "content": "uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Cricket', 'DuckDuckGeese', 'EigenWorms', 'Epilepsy', 'EthanolConcentration', 'ERing',\n           'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting',\n           'Heartbeat', 'InsectWingbeat', 'JapaneseVowels', 'Libras', 'LSST', 'MotorImagery',\n           'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1',\n           'SelfRegulationSCP2', 'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary']\n\nucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n               'Beef',\n               'BeetleFly', 'BirdChicken', 'CBF', 'Car', 'Chinatown', 'ChlorineConcentration', 'CinCECGTorso', 'Coffee',\n               'Computers',\n               'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction', 'DistalPhalanxOutlineAgeGroup',\n               'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame', 'DodgerLoopWeekend',\n               'ECG200', 'ECG5000', 'ECGFiveDays', 'EOGHorizontalSignal', 'EOGVerticalSignal', 'Earthquakes',\n               'ElectricDevices',\n               'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords', 'Fish', 'FordA', 'FordB',\n               'FreezerRegularTrain',\n               'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3', 'GesturePebbleZ1',\n               'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan', 'GunPointMaleVersusFemale', 'GunPointOldVersusYoung',\n               'Ham',\n               'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain',\n               'InsectEPGSmallTrain',\n               'InsectWingbeatSound', 'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2', 'Lightning7',\n               'Mallat', 'Meat',\n               'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',\n               'MiddlePhalanxTW', 'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain',\n               'NonInvasiveFetalECGThorax1',\n               'NonInvasiveFetalECGThorax2', 'OSULeaf', 'OliveOil', 'PLAID', 'PhalangesOutlinesCorrect', 'Phoneme',\n               'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'Plane', 'PowerCons',\n               'ProximalPhalanxOutlineAgeGroup', 'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',\n               'RefrigerationDevices',\n               'Rock', 'ScreenType', 'SemgHandGenderCh2', 'SemgHandMovementCh2', 'SemgHandSubjectCh2',\n               'ShakeGestureWiimoteZ',\n               'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace', 'SonyAIBORobotSurface1',\n               'SonyAIBORobotSurface2', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',\n               'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'TwoPatterns', 'UMD',\n               'UWaveGestureLibraryAll',\n               'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine', 'WordSynonyms',\n               'Worms',\n               'WormsTwoClass', 'Yoga']\n\ndataset_list = list(dataset_list)\nprint(\"len = \", len(dataset_list))\n\n# dataset_list = ['HouseTwenty']\n\ncode_main = 'main_gpt4ts_ucr'  ### main_gpt4ts   multi_hydra_uea_test  multi_rocket_uea_test multi_hydra_ucr_test multi_rocket_ucr_test\n\n"
  },
  {
    "path": "ts_classification_methods/model/__init__.py",
    "content": "from .tsm_model import *\n"
  },
  {
    "path": "ts_classification_methods/model/loss.py",
    "content": "import torch.nn as nn\n\n\ndef cross_entropy():\n    loss = nn.CrossEntropyLoss()\n    return loss\n\n\ndef reconstruction_loss():\n    loss = nn.MSELoss()\n    return loss\n"
  },
  {
    "path": "ts_classification_methods/model/tsm_model.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.utils as utils\n\n\n# (B, C, T) -> (B, C, T-s)\nclass Chomp1d(nn.Module):\n    def __init__(self, chomp_size):\n        super(Chomp1d, self).__init__()\n        self.chomp_size = chomp_size\n\n    def forward(self, x):\n        return x[:, :, :-self.chomp_size]\n\n\nclass SqueezeChannels(nn.Module):\n    def __init__(self):\n        super(SqueezeChannels, self).__init__()\n\n    def forward(self, x):\n        return x.squeeze(2)\n\n\nclass FCN(nn.Module):\n    def __init__(self, num_classes, input_size=1):\n        super(FCN, self).__init__()\n\n        self.num_classes = num_classes\n        self.conv_block1 = nn.Sequential(\n            nn.Conv1d(in_channels=input_size, out_channels=128,\n                      kernel_size=8, padding='same'),\n            nn.BatchNorm1d(128),\n            nn.ReLU()\n        )\n\n        self.conv_block2 = nn.Sequential(\n            nn.Conv1d(in_channels=128, out_channels=256,\n                      kernel_size=5, padding='same'),\n            nn.BatchNorm1d(256),\n            nn.ReLU()\n        )\n\n        self.conv_block3 = nn.Sequential(\n            nn.Conv1d(in_channels=256, out_channels=128,\n                      kernel_size=3, padding='same'),\n            nn.BatchNorm1d(128),\n            nn.ReLU()\n        )\n\n        self.network = nn.Sequential(\n            self.conv_block1,\n            self.conv_block2,\n            self.conv_block3,\n            nn.AdaptiveAvgPool1d(1),\n            SqueezeChannels(),\n\n        )\n\n    def forward(self, x, vis=False):\n        if vis:\n            with torch.no_grad():\n                vis_out = self.conv_block1(x)\n                vis_out = self.conv_block2(vis_out)\n                vis_out = self.conv_block3(vis_out)\n                return self.network(x), vis_out\n        return self.network(x)\n\n\nclass DilatedBlock(nn.Module):\n\n    def __init__(self, in_channels, out_channels, kernel_size, dilation, final=False):\n        super(DilatedBlock, self).__init__()\n\n        padding = (kernel_size - 1) * dilation\n\n        self.conv_block1 = nn.Sequential(\n            utils.weight_norm(nn.Conv1d(in_channels, out_channels, kernel_size,\n                                        padding=padding, dilation=dilation)),\n            Chomp1d(padding),\n            nn.LeakyReLU()\n        )\n\n        self.conv_block2 = nn.Sequential(\n            utils.weight_norm(nn.Conv1d(out_channels, out_channels, kernel_size,\n                                        padding=padding, dilation=dilation)),\n            Chomp1d(padding),\n            nn.LeakyReLU()\n        )\n\n        # whether apply residual connection\n        self.upordownsample = torch.nn.Conv1d(\n            in_channels, out_channels, 1\n        ) if in_channels != out_channels else None\n\n        self.relu = torch.nn.LeakyReLU() if final else None\n\n    def forward(self, x):\n        out = self.conv_block1(x)\n        out = self.conv_block2(out)\n\n        res = x if self.upordownsample is None else self.upordownsample(x)\n\n        if self.relu is None:\n            return out + res\n        else:\n            return self.relu(out + res)\n\n\nclass DilatedConvolution(nn.Module):\n    def __init__(self, in_channels, embedding_channels, out_channels, depth, reduced_size, kernel_size,\n                 num_classes) -> None:\n        super(DilatedConvolution, self).__init__()\n\n        layers = []\n        # dilation size will be doubled at each step according to TLoss\n        dilation_size = 1\n\n        for i in range(depth):\n            block_in_channels = in_channels if i == 0 else embedding_channels\n            layers += [DilatedBlock(block_in_channels,\n                                    embedding_channels, kernel_size, dilation_size)]\n            dilation_size *= 2\n\n        layers += [DilatedBlock(embedding_channels, reduced_size,\n                                kernel_size, dilation_size, final=True)]\n\n        self.global_average_pool = nn.AdaptiveAvgPool1d(1)\n\n        # 注意， dilated中用的是global max pool\n        self.network = nn.Sequential(*layers,\n                                     nn.AdaptiveMaxPool1d(1),\n                                     SqueezeChannels(),\n                                     nn.Linear(reduced_size, out_channels),\n                                     )\n\n    def forward(self, x, vis=False):\n        if vis:\n            with torch.no_grad():\n                return self.network(x), nn.Sequential(*self.layers)(x)\n        return self.network(x)\n\n\nclass DilatedConvolutionVis(nn.Module):\n    def __init__(self, in_channels, embedding_channels, out_channels, depth, reduced_size, kernel_size,\n                 num_classes) -> None:\n        super(DilatedConvolutionVis, self).__init__()\n\n        self.layers = []\n        # dilation size will be doubled at each step according to TLoss\n        dilation_size = 1\n\n        for i in range(depth):\n            block_in_channels = in_channels if i == 0 else embedding_channels\n            self.layers += [DilatedBlock(block_in_channels,\n                                         embedding_channels, kernel_size, dilation_size)]\n            dilation_size *= 2\n\n        self.layers += [DilatedBlock(embedding_channels, reduced_size,\n                                     kernel_size, dilation_size, final=True)]\n\n        self.global_average_pool = nn.AdaptiveAvgPool1d(1)\n\n        # 注意， dilated中用的是global max pool\n        self.network = nn.Sequential(*self.layers,\n                                     nn.AdaptiveMaxPool1d(1),\n                                     SqueezeChannels(),\n                                     # nn.Linear(reduced_size, out_channels),\n                                     )\n\n    def forward(self, x, vis=False):\n        if vis:\n            with torch.no_grad():\n                return self.network(x), nn.Sequential(*self.layers)(x)\n        return self.network(x)\n\n\nclass Classifier(nn.Module):\n    def __init__(self, input_dims, output_dims) -> None:\n        super(Classifier, self).__init__()\n\n        self.dense = nn.Linear(input_dims, output_dims)\n        self.softmax = nn.Softmax(dim=1)\n\n    def forward(self, x):\n        return self.softmax(self.dense(x))\n\n\nclass NonLinearClassifier(nn.Module):\n    def __init__(self, input_dim, embedding_dim, output_dim, dropout=0.2) -> None:\n        super(NonLinearClassifier, self).__init__()\n\n        self.net = nn.Sequential(\n            nn.Linear(input_dim, embedding_dim),\n            nn.BatchNorm1d(embedding_dim),\n            nn.ReLU(),\n            nn.Dropout(dropout),\n            nn.Linear(embedding_dim, output_dim),\n            nn.Softmax(dim=1)\n        )\n\n    def forward(self, x):\n        return self.net(x)\n\n\nclass NonLinearClassifierVis(nn.Module):\n    def __init__(self, input_dim, embedding_dim, output_dim, dropout=0.2) -> None:\n        super(NonLinearClassifierVis, self).__init__()\n\n        self.dense = nn.Linear(input_dim, embedding_dim)\n        self.batchnorm = nn.BatchNorm1d(embedding_dim)\n        self.relu = nn.ReLU()\n        self.dropout = nn.Dropout(dropout)\n        self.dense2 = nn.Linear(embedding_dim, output_dim)\n\n        self.net = nn.Sequential(\n            self.dense,\n            self.batchnorm,\n            self.relu,\n            self.dropout,\n            self.dense2,\n            nn.Softmax(dim=1)\n        )\n\n    def forward(self, x, vis=False):\n        if vis:\n            with torch.no_grad():\n                x_out = self.dense(x)\n                x_out = self.batchnorm(x_out)\n                x_out = self.relu(x_out)\n                x_out = self.dropout(x_out)\n                return self.net(x), x_out\n        return self.net(x)\n\n\n# for single step\nclass RNNDecoder(nn.Module):\n    def __init__(self, input_dim=1, embedding_dim=128) -> None:\n        super(RNNDecoder, self).__init__()\n        self.grucell1 = nn.GRUCell(\n            input_size=input_dim, hidden_size=embedding_dim)\n        self.grucell2 = nn.GRUCell(\n            input_size=embedding_dim, hidden_size=embedding_dim)\n        self.grucell3 = nn.GRUCell(\n            input_size=embedding_dim, hidden_size=embedding_dim)\n\n        self.linear = nn.Linear(in_features=embedding_dim, out_features=input_dim)\n\n    # x : single time step (batch_size, 1)\n    # TODO 是否将训练循环改到train.py中\n    def forward(self, h1, h2, h3, x):\n        hidden1 = self.grucell1(x, h1)\n        hidden2 = self.grucell2(hidden1, h2)\n        hidden3 = self.grucell3(hidden2, h3)\n\n        out = self.linear(hidden3)\n\n        return hidden1, hidden2, hidden3, out\n\n\ndef conv_out_len(seq_len, ker_size, stride, dilation, stack):\n    i = 0\n    for _ in range(stack):\n        seq_len = int(\n            (seq_len + (ker_size[i] - 1) - dilation * (ker_size[i] - 1) - 1) / stride + 1)\n        i = i + 1\n    return seq_len\n\n\nclass FCNDecoder(nn.Module):\n    # The formula for padding='SAME'，padding = (kernel_size - 1) / 2\n    # Ref: https://blog.csdn.net/crystal_sugar/article/details/105547838, http://www.itsnl.cn/16590.html\n    def __init__(self, num_classes, seq_len=None, input_size=None):\n        super(FCNDecoder, self).__init__()\n\n        self.num_classes = num_classes\n        self.compressed_len = conv_out_len(seq_len=seq_len, ker_size=[\n            3, 5, 7], stride=1, dilation=1, stack=3)\n\n        self.conv_trans_block1 = nn.Sequential(\n            nn.ConvTranspose1d(in_channels=128, out_channels=128,\n                               kernel_size=3, padding=1, output_padding=0),\n            nn.BatchNorm1d(128),\n            nn.ReLU()\n        )\n\n        self.conv_trans_block2 = nn.Sequential(\n            nn.ConvTranspose1d(in_channels=128, out_channels=256,\n                               kernel_size=5, padding=2, output_padding=0),\n            nn.BatchNorm1d(256),\n            nn.ReLU()\n        )\n\n        self.conv_trans_block3 = nn.Sequential(\n            nn.ConvTranspose1d(in_channels=256, out_channels=128,\n                               kernel_size=7, padding=3, output_padding=0),\n            nn.BatchNorm1d(128),\n            nn.ReLU()\n        )\n\n        self.network = nn.Sequential(\n            self.conv_trans_block1,\n            self.conv_trans_block2,\n            self.conv_trans_block3,\n\n        )\n\n        self.upsample = nn.Linear(1, self.compressed_len)\n        self.conv1x1 = nn.Conv1d(128, input_size, 1)\n\n    def forward(self, x):\n        if len(x.shape) == 2:\n            x = x.unsqueeze(2)\n        x = self.upsample(x)\n        x = self.network(x)\n        x = self.conv1x1(x)\n        return x\n\n\nif __name__ == '__main__':\n    pass\n\n# TODO\n# add args（depth, in_channels, out_channels, reduced_size, embedding_channels, kernel_size  in train.py\n# finish dataloader.py\n"
  },
  {
    "path": "ts_classification_methods/patchtst/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/patchtst/main_patchtst_iota.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom gpt4ts.gpt4ts_utils import load_UEA, normalize_uea_set, UEADataset, save_cls_new_result, set_seed, fill_nan_value, get_all_datasets, build_loss\n\nfrom gpt4ts.models.gpt4ts import gpt4ts\n\nfrom patchtst.models.patchTST import PatchTST\nfrom patchtst.patch_mask import PatchCB\n\n\ndef create_patch(xb, patch_len, stride):\n    \"\"\"\n    xb: [bs x seq_len x n_vars]\n    \"\"\"\n    seq_len = xb.shape[1]\n    num_patch = (max(seq_len, patch_len) - patch_len) // stride + 1\n    tgt_len = patch_len + stride * (num_patch - 1)\n    s_begin = seq_len - tgt_len\n\n    xb = xb[:, s_begin:, :]  # xb: [bs x tgt_len x nvars]\n    xb = xb.unfold(dimension=1, size=patch_len, step=stride)  # xb: [bs x num_patch x n_vars x patch_len]\n    return xb, num_patch\n\n\ndef evaluate_gpt4ts(args, val_loader, model, loss):\n    val_loss = 0\n    val_accu = 0\n\n    sum_len = 0\n    for data, target in val_loader:\n        '''\n        data, target = data.to(device), target.to(device)\n        target = target.to(torch.int64)\n        '''\n        with torch.no_grad():\n            xb, num_patch = create_patch(xb=data.permute(0, 2, 1), patch_len=args.patch_len, stride=args.stride)\n            val_pred = model(xb)\n            val_loss += loss(val_pred, target).item()\n            val_accu += torch.sum(torch.argmax(val_pred.data, axis=1) == target)\n            sum_len += len(target)\n\n    return val_loss / sum_len, val_accu / sum_len\n\n\nif __name__ == '__main__':  ##\n    parser = argparse.ArgumentParser()\n\n    # Base setup\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n    # Dataset setup\n    parser.add_argument('--dataset', type=str, default='LSST',\n                        help='dataset(in ucr)')  # LSST Heartbeat Images\n    # parser.add_argument('--dataroot', type=str, default='../UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/dev_data/lz/time_series_pretrain/datasets/UCRArchive_2018',\n    #                     help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018', help='path of UCR folder')\n    parser.add_argument('--dataroot', type=str, default='/dev_data/lz/Multivariate2018_arff', help='path of UEA folder')\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n    parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n    # parser.add_argument('--patch_size', type=int, default=8, help='patch_size')\n    # parser.add_argument('--stride', type=int, default=8, help='stride')\n\n    parser.add_argument('--target_points', type=int, default=96, help='forecast horizon')\n\n    # Patch\n    parser.add_argument('--patch_len', type=int, default=8, help='patch length')\n    parser.add_argument('--stride', type=int, default=8, help='stride between patch')\n\n    # RevIN\n    parser.add_argument('--revin', type=int, default=1, help='reversible instance normalization')\n    # Model args\n    parser.add_argument('--n_layers', type=int, default=3, help='number of Transformer layers')\n    parser.add_argument('--n_heads', type=int, default=16, help='number of Transformer heads')\n    parser.add_argument('--d_model', type=int, default=128, help='Transformer d_model')\n    parser.add_argument('--d_ff', type=int, default=256, help='Tranformer MLP dimension')\n    parser.add_argument('--dropout', type=float, default=0.2, help='Transformer dropout')\n    parser.add_argument('--head_dropout', type=float, default=0, help='head dropout')\n\n    # Semi training\n    parser.add_argument('--labeled_ratio', type=float, default='0.1', help='0.1, 0.2, 0.4')\n\n    # training setup\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--batch_size', type=int, default=128, help='')\n    parser.add_argument('--epoch', type=int, default=100, help='training epoch')\n    parser.add_argument('--cuda', type=str, default='cuda:0')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/time_series_label_noise/result')\n    parser.add_argument('--save_csv_name', type=str, default='patchtst_supervised_patch8_1224_')\n\n    # classifier setup\n    parser.add_argument('--classifier', type=str, default='linear', help='type of classifier(linear or nonlinear)')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n\n    args = parser.parse_args()\n\n    device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n    set_seed(args)\n\n    # sum_dataset, sum_target, num_classes = build_dataset(args)\n    sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n    # args.num_classes = num_classes\n    # args.seq_len = sum_dataset.shape[1]\n\n    args.num_classes = num_classes\n    args.seq_len = sum_dataset.shape[1]\n    args.input_size = sum_dataset.shape[2]\n\n    # get number of patches\n    num_patch = (max(args.seq_len, args.patch_len) - args.patch_len) // args.stride + 1\n    print('number of patches:', num_patch)\n\n    while sum_dataset.shape[0] * 0.6 < args.batch_size:\n        args.batch_size = args.batch_size // 2\n\n    print(\"args.batch_size = \", args.batch_size, \", sum_dataset.shape = \", sum_dataset.shape)\n\n    # get model\n    model = PatchTST(c_in=args.input_size,\n                     target_dim=args.target_points,\n                     patch_len=args.patch_len,\n                     stride=args.stride,\n                     num_patch=num_patch,\n                     n_layers=args.n_layers,\n                     n_heads=args.n_heads,\n                     d_model=args.d_model,\n                     shared_embedding=True,\n                     d_ff=args.d_ff,\n                     dropout=args.dropout,\n                     head_dropout=args.head_dropout,\n                     act='relu',\n                     head_type='classification',\n                     res_attention=False\n                     )\n\n\n    # model = gpt4ts(max_seq_len=args.seq_len, num_classes=args.num_classes, var_len=args.input_size, patch_size=args.patch_size, stride=args.stride)\n    model = model.to(device)\n\n    # model, classifier = build_model(args)\n    # model, classifier = model.to(device), classifier.to(device)\n    loss = build_loss(args).to(device)\n\n    model_init_state = model.state_dict()\n    # classifier_init_state = classifier.state_dict()\n\n    if args.optimizer == 'adam':\n        optimizer = torch.optim.Adam([{'params': model.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    losses = []\n    test_accuracies = []\n    train_time = 0.0\n    end_val_epochs = []\n\n    for i, train_dataset in enumerate(train_datasets):\n        t = time.time()\n        model.load_state_dict(model_init_state)\n        # classifier.load_state_dict(classifier_init_state)\n        print('{} fold start training and evaluate'.format(i))\n\n        train_target = train_targets[i]\n        val_dataset = val_datasets[i]\n        val_target = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_uea_set(train_dataset)\n            val_dataset = normalize_uea_set(val_dataset)\n            test_dataset = normalize_uea_set(test_dataset)\n        # else:\n        #     train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n        #                                                                         test_dataset)\n\n        train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device),\n                               torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device),\n                             torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device),\n                              torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True)\n        val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0)\n        test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0)\n\n        train_loss = []\n        train_accuracy = []\n        num_steps = args.epoch // args.batch_size\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        num_steps = train_set.__len__() // args.batch_size\n\n        min_val_loss = float('inf')\n        test_accuracy = 0\n        end_val_epoch = 0\n\n        for epoch in range(args.epoch):\n\n            if stop_count == 80 or increase_count == 80:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n\n            epoch_train_loss = 0\n            epoch_train_acc = 0\n            num_iterations = 0\n\n            model.train()\n            train_embed = []\n\n            for x, y in train_loader:\n                optimizer.zero_grad()\n                # print(\"raw x.shape = \", x.shape)\n                xb, num_patch = create_patch(xb=x.permute(0,2,1), patch_len=args.patch_len, stride=args.stride)\n                # print(\"patch xb.shape = \", xb.shape)\n\n                pred = model(xb)\n                step_loss = loss(pred, y)\n\n                # step_loss.backward(retain_graph=True)\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_train_loss += step_loss.item()\n                epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                num_iterations += 1\n\n            epoch_train_loss /= num_steps\n            epoch_train_acc /= num_steps\n            # train_embed = np.concatenate(train_embed)\n\n            model.eval()\n\n            val_loss, val_accu = evaluate_gpt4ts(args, val_loader, model, loss)\n            if min_val_loss > val_loss:\n                min_val_loss = val_loss\n                end_val_epoch = epoch\n                test_loss, test_accuracy = evaluate_gpt4ts(args, test_loader, model, loss)\n\n            if abs(last_loss - val_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if val_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = val_loss\n\n            if epoch % 50 == 0:\n                print(\n                    \"epoch : {}, train loss: {} , train accuracy : {}, \\ntest_accuracy : {}\".format(\n                        epoch, epoch_train_loss, epoch_train_acc, test_accuracy))\n\n        test_accuracies.append(test_accuracy)\n        end_val_epochs.append(end_val_epoch)\n        t = time.time() - t\n        train_time += t\n\n        print('{} fold finish training'.format(i))\n\n    test_accuracies = torch.Tensor(test_accuracies)\n\n    print(\"Training end: mean_test_acc = \", round(torch.mean(test_accuracies).item(), 4),\n          \"traning time (seconds) = \",\n          round(train_time, 4), \", seed = \", args.random_seed)\n\n    test_accuracies = test_accuracies.cpu().numpy()\n\n    save_cls_new_result(args, np.mean(test_accuracies), np.max(test_accuracies), np.min(test_accuracies),\n                        np.std(test_accuracies), train_time)\n\n    print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/patchtst/main_patchtst_ucr.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom gpt4ts.gpt4ts_utils import load_UEA, normalize_uea_set, UEADataset, save_cls_new_result, set_seed, fill_nan_value, get_all_datasets, build_loss, build_dataset\n\nfrom gpt4ts.models.gpt4ts import gpt4ts\n\nfrom patchtst.models.patchTST import PatchTST\nfrom patchtst.patch_mask import PatchCB\n\n\ndef create_patch(xb, patch_len, stride):\n    \"\"\"\n    xb: [bs x seq_len x n_vars]\n    \"\"\"\n    seq_len = xb.shape[1]\n    num_patch = (max(seq_len, patch_len) - patch_len) // stride + 1\n    tgt_len = patch_len + stride * (num_patch - 1)\n    s_begin = seq_len - tgt_len\n\n    xb = xb[:, s_begin:, :]  # xb: [bs x tgt_len x nvars]\n    xb = xb.unfold(dimension=1, size=patch_len, step=stride)  # xb: [bs x num_patch x n_vars x patch_len]\n    return xb, num_patch\n\n\ndef evaluate_gpt4ts(args, val_loader, model, loss):\n    val_loss = 0\n    val_accu = 0\n\n    sum_len = 0\n    for data, target in val_loader:\n        '''\n        data, target = data.to(device), target.to(device)\n        target = target.to(torch.int64)\n        '''\n        with torch.no_grad():\n            xb, num_patch = create_patch(xb=data.permute(0, 2, 1), patch_len=args.patch_len, stride=args.stride)\n            val_pred = model(xb)\n            val_loss += loss(val_pred, target).item()\n            val_accu += torch.sum(torch.argmax(val_pred.data, axis=1) == target)\n            sum_len += len(target)\n\n    return val_loss / sum_len, val_accu / sum_len\n\n\nif __name__ == '__main__':  ##\n    parser = argparse.ArgumentParser()\n\n    # Base setup\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n    # Dataset setup  UCR, PatchTST:  ['Beef', 'Ham']\n    parser.add_argument('--dataset', type=str, default='Ham',\n                        help='dataset(in ucr)')  # LSST Heartbeat Images   # Trace, TwoPatterns, UWaveGestureLibraryAll\n    # parser.add_argument('--dataroot', type=str, default='../UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/dev_data/lz/time_series_pretrain/datasets/UCRArchive_2018',\n    #                     help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/Multivariate2018_arff', help='path of UEA folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018',\n    #                     help='path of UCR folder')\n    parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018',\n                        help='path of UCR folder')\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n    parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n    # parser.add_argument('--patch_size', type=int, default=8, help='patch_size')\n    # parser.add_argument('--stride', type=int, default=8, help='stride')\n\n    parser.add_argument('--target_points', type=int, default=96, help='forecast horizon')\n\n    # Patch\n    parser.add_argument('--patch_len', type=int, default=8, help='patch length')\n    parser.add_argument('--stride', type=int, default=8, help='stride between patch')\n\n    # RevIN\n    parser.add_argument('--revin', type=int, default=1, help='reversible instance normalization')\n    # Model args\n    parser.add_argument('--n_layers', type=int, default=3, help='number of Transformer layers')\n    parser.add_argument('--n_heads', type=int, default=16, help='number of Transformer heads')\n    parser.add_argument('--d_model', type=int, default=128, help='Transformer d_model')\n    parser.add_argument('--d_ff', type=int, default=256, help='Tranformer MLP dimension')\n    parser.add_argument('--dropout', type=float, default=0.2, help='Transformer dropout')\n    parser.add_argument('--head_dropout', type=float, default=0, help='head dropout')\n\n    # Semi training\n    parser.add_argument('--labeled_ratio', type=float, default='0.1', help='0.1, 0.2, 0.4')\n\n    # training setup\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--batch_size', type=int, default=8, help='')\n    parser.add_argument('--epoch', type=int, default=100, help='training epoch')\n    parser.add_argument('--cuda', type=str, default='cuda:0')\n\n    # parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_series_label_noise/result')\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_series_label_noise/result')\n    parser.add_argument('--save_csv_name', type=str, default='patchtst_supervised_240731_')\n\n    # classifier setup\n    parser.add_argument('--classifier', type=str, default='linear', help='type of classifier(linear or nonlinear)')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n\n    args = parser.parse_args()\n\n    device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n    set_seed(args)\n\n    # sum_dataset, sum_target, num_classes = build_dataset(args)\n    # sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n    # args.num_classes = num_classes\n    # args.seq_len = sum_dataset.shape[1]\n    sum_dataset, sum_target, num_classes = build_dataset(args)\n    # args.num_classes = num_classes\n    # x_train_labeled = x_train_few[:, np.newaxis, :]\n    # x_val_labeled = val_dataset[:, np.newaxis, :]\n    # x_test_labeled = test_dataset[:, np.newaxis, :]\n\n    sum_dataset = sum_dataset[:, :, np.newaxis]\n\n    args.num_classes = num_classes\n    args.seq_len = sum_dataset.shape[1]\n    args.input_size = sum_dataset.shape[2]\n\n    # get number of patches\n    num_patch = (max(args.seq_len, args.patch_len) - args.patch_len) // args.stride + 1\n    print('number of patches:', num_patch)\n\n    while sum_dataset.shape[0] * 0.6 < args.batch_size:\n        args.batch_size = args.batch_size // 2\n\n    print(\"args.batch_size = \", args.batch_size, \", sum_dataset.shape = \", sum_dataset.shape)\n\n    # get model\n    model = PatchTST(c_in=args.input_size,\n                     target_dim=args.target_points,\n                     patch_len=args.patch_len,\n                     stride=args.stride,\n                     num_patch=num_patch,\n                     n_layers=args.n_layers,\n                     n_heads=args.n_heads,\n                     d_model=args.d_model,\n                     shared_embedding=True,\n                     d_ff=args.d_ff,\n                     dropout=args.dropout,\n                     head_dropout=args.head_dropout,\n                     act='relu',\n                     head_type='classification',\n                     res_attention=False\n                     )\n\n\n    # model = gpt4ts(max_seq_len=args.seq_len, num_classes=args.num_classes, var_len=args.input_size, patch_size=args.patch_size, stride=args.stride)\n    model = model.to(device)\n\n    # model, classifier = build_model(args)\n    # model, classifier = model.to(device), classifier.to(device)\n    loss = build_loss(args).to(device)\n\n    model_init_state = model.state_dict()\n    # classifier_init_state = classifier.state_dict()\n\n    if args.optimizer == 'adam':\n        optimizer = torch.optim.Adam([{'params': model.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    losses = []\n    test_accuracies = []\n    train_time = 0.0\n    end_val_epochs = []\n\n    for i, train_dataset in enumerate(train_datasets):\n        t = time.time()\n        model.load_state_dict(model_init_state)\n        # classifier.load_state_dict(classifier_init_state)\n        print('{} fold start training and evaluate'.format(i))\n\n        train_target = train_targets[i]\n        val_dataset = val_datasets[i]\n        val_target = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_uea_set(train_dataset)\n            val_dataset = normalize_uea_set(val_dataset)\n            test_dataset = normalize_uea_set(test_dataset)\n        # else:\n        #     train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n        #                                                                         test_dataset)\n\n        train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device),\n                               torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device),\n                             torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device),\n                              torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True)\n        val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0)\n        test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0)\n\n        train_loss = []\n        train_accuracy = []\n        num_steps = args.epoch // args.batch_size\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        num_steps = train_set.__len__() // args.batch_size\n\n        min_val_loss = float('inf')\n        test_accuracy = 0\n        end_val_epoch = 0\n\n        for epoch in range(args.epoch):\n\n            if stop_count == 50 or increase_count == 50:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n\n            epoch_train_loss = 0\n            epoch_train_acc = 0\n            num_iterations = 0\n\n            model.train()\n            train_embed = []\n\n            for x, y in train_loader:\n                optimizer.zero_grad()\n                # print(\"raw x.shape = \", x.shape)\n                xb, num_patch = create_patch(xb=x.permute(0,2,1), patch_len=args.patch_len, stride=args.stride)\n                # print(\"patch xb.shape = \", xb.shape)\n\n                pred = model(xb)\n                step_loss = loss(pred, y)\n\n                # step_loss.backward(retain_graph=True)\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_train_loss += step_loss.item()\n                epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                num_iterations += 1\n\n            epoch_train_loss /= num_steps\n            epoch_train_acc /= num_steps\n            # train_embed = np.concatenate(train_embed)\n\n            model.eval()\n\n            val_loss, val_accu = evaluate_gpt4ts(args, val_loader, model, loss)\n            if min_val_loss > val_loss:\n                min_val_loss = val_loss\n                end_val_epoch = epoch\n                test_loss, test_accuracy = evaluate_gpt4ts(args, test_loader, model, loss)\n\n            if abs(last_loss - val_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if val_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = val_loss\n\n            if epoch % 50 == 0:\n                print(\n                    \"epoch : {}, train loss: {} , train accuracy : {}, \\ntest_accuracy : {}\".format(\n                        epoch, epoch_train_loss, epoch_train_acc, test_accuracy))\n\n        test_accuracies.append(test_accuracy)\n        end_val_epochs.append(end_val_epoch)\n        t = time.time() - t\n        train_time += t\n\n        print('{} fold finish training'.format(i))\n\n    test_accuracies = torch.Tensor(test_accuracies)\n\n    print(\"Training end: mean_test_acc = \", round(torch.mean(test_accuracies).item(), 4),\n          \"traning time (seconds) = \",\n          round(train_time, 4), \", seed = \", args.random_seed)\n\n    test_accuracies = test_accuracies.cpu().numpy()\n\n    save_cls_new_result(args, np.mean(test_accuracies), np.max(test_accuracies), np.min(test_accuracies),\n                        np.std(test_accuracies), train_time)\n\n    print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/patchtst/mian_patchtst.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom gpt4ts.gpt4ts_utils import load_UEA, normalize_uea_set, UEADataset, save_cls_new_result, set_seed, fill_nan_value, get_all_datasets, build_loss\n\nfrom gpt4ts.models.gpt4ts import gpt4ts\n\nfrom patchtst.models.patchTST import PatchTST\nfrom patchtst.patch_mask import PatchCB\n\n\ndef create_patch(xb, patch_len, stride):\n    \"\"\"\n    xb: [bs x seq_len x n_vars]\n    \"\"\"\n    seq_len = xb.shape[1]\n    num_patch = (max(seq_len, patch_len) - patch_len) // stride + 1\n    tgt_len = patch_len + stride * (num_patch - 1)\n    s_begin = seq_len - tgt_len\n\n    xb = xb[:, s_begin:, :]  # xb: [bs x tgt_len x nvars]\n    xb = xb.unfold(dimension=1, size=patch_len, step=stride)  # xb: [bs x num_patch x n_vars x patch_len]\n    return xb, num_patch\n\n\ndef evaluate_gpt4ts(args, val_loader, model, loss):\n    val_loss = 0\n    val_accu = 0\n\n    sum_len = 0\n    for data, target in val_loader:\n        '''\n        data, target = data.to(device), target.to(device)\n        target = target.to(torch.int64)\n        '''\n        with torch.no_grad():\n            xb, num_patch = create_patch(xb=data.permute(0, 2, 1), patch_len=args.patch_len, stride=args.stride)\n            val_pred = model(xb)\n            val_loss += loss(val_pred, target).item()\n            val_accu += torch.sum(torch.argmax(val_pred.data, axis=1) == target)\n            sum_len += len(target)\n\n    return val_loss / sum_len, val_accu / sum_len\n\n\nif __name__ == '__main__':  ##\n    parser = argparse.ArgumentParser()\n\n    # Base setup\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n    # Dataset setup  UEA, PatchTST:  ['DuckDuckGeese', 'EigenWorms', 'MotorImagery', 'PEMS-SF', 'StandWalkJump']\n    parser.add_argument('--dataset', type=str, default='EigenWorms',\n                        help='dataset(in ucr)')  # LSST Heartbeat Images\n    # parser.add_argument('--dataroot', type=str, default='../UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/dev_data/lz/time_series_pretrain/datasets/UCRArchive_2018',\n    #                     help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018', help='path of UCR folder')\n    parser.add_argument('--dataroot', type=str, default='/SSD/lz/Multivariate2018_arff', help='path of UEA folder')\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n    parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n    # parser.add_argument('--patch_size', type=int, default=8, help='patch_size')\n    # parser.add_argument('--stride', type=int, default=8, help='stride')\n\n    parser.add_argument('--target_points', type=int, default=96, help='forecast horizon')\n\n    # Patch\n    parser.add_argument('--patch_len', type=int, default=8, help='patch length')\n    parser.add_argument('--stride', type=int, default=8, help='stride between patch')\n\n    # RevIN\n    parser.add_argument('--revin', type=int, default=1, help='reversible instance normalization')\n    # Model args\n    parser.add_argument('--n_layers', type=int, default=3, help='number of Transformer layers')\n    parser.add_argument('--n_heads', type=int, default=16, help='number of Transformer heads')\n    parser.add_argument('--d_model', type=int, default=128, help='Transformer d_model')\n    parser.add_argument('--d_ff', type=int, default=256, help='Tranformer MLP dimension')\n    parser.add_argument('--dropout', type=float, default=0.2, help='Transformer dropout')\n    parser.add_argument('--head_dropout', type=float, default=0, help='head dropout')\n\n    # Semi training\n    parser.add_argument('--labeled_ratio', type=float, default='0.1', help='0.1, 0.2, 0.4')\n\n    # training setup\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--batch_size', type=int, default=1, help='')\n    parser.add_argument('--epoch', type=int, default=100, help='training epoch')\n    parser.add_argument('--cuda', type=str, default='cuda:0')\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_series_label_noise/result')\n    parser.add_argument('--save_csv_name', type=str, default='patchtst_uea_supervised_240731_')\n\n    # classifier setup\n    parser.add_argument('--classifier', type=str, default='linear', help='type of classifier(linear or nonlinear)')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n\n    args = parser.parse_args()\n\n    device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n    set_seed(args)\n\n    # sum_dataset, sum_target, num_classes = build_dataset(args)\n    sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n    # args.num_classes = num_classes\n    # args.seq_len = sum_dataset.shape[1]\n\n    args.num_classes = num_classes\n    args.seq_len = sum_dataset.shape[1]\n    args.input_size = sum_dataset.shape[2]\n\n    # get number of patches\n    num_patch = (max(args.seq_len, args.patch_len) - args.patch_len) // args.stride + 1\n    print('number of patches:', num_patch)\n\n    while sum_dataset.shape[0] * 0.6 < args.batch_size:\n        args.batch_size = args.batch_size // 2\n\n    print(\"args.batch_size = \", args.batch_size, \", sum_dataset.shape = \", sum_dataset.shape)\n\n    # get model\n    model = PatchTST(c_in=args.input_size,\n                     target_dim=args.target_points,\n                     patch_len=args.patch_len,\n                     stride=args.stride,\n                     num_patch=num_patch,\n                     n_layers=args.n_layers,\n                     n_heads=args.n_heads,\n                     d_model=args.d_model,\n                     shared_embedding=True,\n                     d_ff=args.d_ff,\n                     dropout=args.dropout,\n                     head_dropout=args.head_dropout,\n                     act='relu',\n                     head_type='classification',\n                     res_attention=False\n                     )\n\n\n    # model = gpt4ts(max_seq_len=args.seq_len, num_classes=args.num_classes, var_len=args.input_size, patch_size=args.patch_size, stride=args.stride)\n    model = model.to(device)\n\n    # model, classifier = build_model(args)\n    # model, classifier = model.to(device), classifier.to(device)\n    loss = build_loss(args).to(device)\n\n    model_init_state = model.state_dict()\n    # classifier_init_state = classifier.state_dict()\n\n    if args.optimizer == 'adam':\n        optimizer = torch.optim.Adam([{'params': model.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    losses = []\n    test_accuracies = []\n    train_time = 0.0\n    end_val_epochs = []\n\n    for i, train_dataset in enumerate(train_datasets):\n        t = time.time()\n        model.load_state_dict(model_init_state)\n        # classifier.load_state_dict(classifier_init_state)\n        print('{} fold start training and evaluate'.format(i))\n\n        train_target = train_targets[i]\n        val_dataset = val_datasets[i]\n        val_target = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_uea_set(train_dataset)\n            val_dataset = normalize_uea_set(val_dataset)\n            test_dataset = normalize_uea_set(test_dataset)\n        # else:\n        #     train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n        #                                                                         test_dataset)\n\n        train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device),\n                               torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device),\n                             torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device),\n                              torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True)\n        val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0)\n        test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0)\n\n        train_loss = []\n        train_accuracy = []\n        num_steps = args.epoch // args.batch_size\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        num_steps = train_set.__len__() // args.batch_size\n\n        min_val_loss = float('inf')\n        test_accuracy = 0\n        end_val_epoch = 0\n\n        for epoch in range(args.epoch):\n\n            if stop_count == 50 or increase_count == 50:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n\n            epoch_train_loss = 0\n            epoch_train_acc = 0\n            num_iterations = 0\n\n            model.train()\n            train_embed = []\n\n            for x, y in train_loader:\n                optimizer.zero_grad()\n                # print(\"raw x.shape = \", x.shape)\n                xb, num_patch = create_patch(xb=x.permute(0,2,1), patch_len=args.patch_len, stride=args.stride)\n                # print(\"patch xb.shape = \", xb.shape)\n\n                pred = model(xb)\n                step_loss = loss(pred, y)\n\n                # step_loss.backward(retain_graph=True)\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_train_loss += step_loss.item()\n                epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                num_iterations += 1\n\n            epoch_train_loss /= num_steps\n            epoch_train_acc /= num_steps\n            # train_embed = np.concatenate(train_embed)\n\n            model.eval()\n\n            val_loss, val_accu = evaluate_gpt4ts(args, val_loader, model, loss)\n            if min_val_loss > val_loss:\n                min_val_loss = val_loss\n                end_val_epoch = epoch\n                test_loss, test_accuracy = evaluate_gpt4ts(args, test_loader, model, loss)\n\n            if abs(last_loss - val_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if val_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = val_loss\n\n            if epoch % 50 == 0:\n                print(\n                    \"epoch : {}, train loss: {} , train accuracy : {}, \\ntest_accuracy : {}\".format(\n                        epoch, epoch_train_loss, epoch_train_acc, test_accuracy))\n\n        test_accuracies.append(test_accuracy)\n        end_val_epochs.append(end_val_epoch)\n        t = time.time() - t\n        train_time += t\n\n        print('{} fold finish training'.format(i))\n\n    test_accuracies = torch.Tensor(test_accuracies)\n\n    print(\"Training end: mean_test_acc = \", round(torch.mean(test_accuracies).item(), 4),\n          \"traning time (seconds) = \",\n          round(train_time, 4), \", seed = \", args.random_seed)\n\n    test_accuracies = test_accuracies.cpu().numpy()\n\n    save_cls_new_result(args, np.mean(test_accuracies), np.max(test_accuracies), np.min(test_accuracies),\n                        np.std(test_accuracies), train_time)\n\n    print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/patchtst/models/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/patchtst/models/attention.py",
    "content": "import torch\nfrom torch import nn\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom typing import Optional\n\n\nclass MultiheadAttention(nn.Module):\n    def __init__(self, d_model, n_heads, d_k=None, d_v=None, res_attention=False, attn_dropout=0., proj_dropout=0.,\n                 qkv_bias=True, lsa=False):\n        \"\"\"Multi Head Attention Layer\n        Input shape:\n            Q:       [batch_size (bs) x max_q_len x d_model]\n            K, V:    [batch_size (bs) x q_len x d_model]\n            mask:    [q_len x q_len]\n        \"\"\"\n        super().__init__()\n        d_k = d_model // n_heads if d_k is None else d_k\n        d_v = d_model // n_heads if d_v is None else d_v\n\n        self.n_heads, self.d_k, self.d_v = n_heads, d_k, d_v\n\n        self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias)\n        self.W_K = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias)\n        self.W_V = nn.Linear(d_model, d_v * n_heads, bias=qkv_bias)\n\n        # Scaled Dot-Product Attention (multiple heads)\n        self.res_attention = res_attention\n        self.sdp_attn = ScaledDotProductAttention(d_model, n_heads, attn_dropout=attn_dropout,\n                                                  res_attention=self.res_attention, lsa=lsa)\n\n        # Poject output\n        self.to_out = nn.Sequential(nn.Linear(n_heads * d_v, d_model), nn.Dropout(proj_dropout))\n\n    def forward(self, Q: Tensor, K: Optional[Tensor] = None, V: Optional[Tensor] = None, prev: Optional[Tensor] = None,\n                key_padding_mask: Optional[Tensor] = None, attn_mask: Optional[Tensor] = None):\n\n        bs = Q.size(0)\n        if K is None: K = Q\n        if V is None: V = Q\n\n        # Linear (+ split in multiple heads)\n        q_s = self.W_Q(Q).view(bs, -1, self.n_heads, self.d_k).transpose(1,\n                                                                         2)  # q_s    : [bs x n_heads x max_q_len x d_k]\n        k_s = self.W_K(K).view(bs, -1, self.n_heads, self.d_k).permute(0, 2, 3,\n                                                                       1)  # k_s    : [bs x n_heads x d_k x q_len] - transpose(1,2) + transpose(2,3)\n        v_s = self.W_V(V).view(bs, -1, self.n_heads, self.d_v).transpose(1, 2)  # v_s    : [bs x n_heads x q_len x d_v]\n\n        # Apply Scaled Dot-Product Attention (multiple heads)\n        if self.res_attention:\n            output, attn_weights, attn_scores = self.sdp_attn(q_s, k_s, v_s, prev=prev,\n                                                              key_padding_mask=key_padding_mask, attn_mask=attn_mask)\n        else:\n            output, attn_weights = self.sdp_attn(q_s, k_s, v_s, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\n        # output: [bs x n_heads x q_len x d_v], attn: [bs x n_heads x q_len x q_len], scores: [bs x n_heads x max_q_len x q_len]\n\n        # back to the original inputs dimensions\n        output = output.transpose(1, 2).contiguous().view(bs, -1,\n                                                          self.n_heads * self.d_v)  # output: [bs x q_len x n_heads * d_v]\n        output = self.to_out(output)\n\n        if self.res_attention:\n            return output, attn_weights, attn_scores\n        else:\n            return output, attn_weights\n\n\nclass ScaledDotProductAttention(nn.Module):\n    r\"\"\"Scaled Dot-Product Attention module (Attention is all you need by Vaswani et al., 2017) with optional residual attention from previous layer\n    (Realformer: Transformer likes residual attention by He et al, 2020) and locality self sttention (Vision Transformer for Small-Size Datasets\n    by Lee et al, 2021)\"\"\"\n\n    def __init__(self, d_model, n_heads, attn_dropout=0., res_attention=False, lsa=False):\n        super().__init__()\n        self.attn_dropout = nn.Dropout(attn_dropout)\n        self.res_attention = res_attention\n        head_dim = d_model // n_heads\n        self.scale = nn.Parameter(torch.tensor(head_dim ** -0.5), requires_grad=lsa)\n        self.lsa = lsa\n\n    def forward(self, q: Tensor, k: Tensor, v: Tensor, prev: Optional[Tensor] = None,\n                key_padding_mask: Optional[Tensor] = None, attn_mask: Optional[Tensor] = None):\n        '''\n        Input shape:\n            q               : [bs x n_heads x max_q_len x d_k]\n            k               : [bs x n_heads x d_k x seq_len]\n            v               : [bs x n_heads x seq_len x d_v]\n            prev            : [bs x n_heads x q_len x seq_len]\n            key_padding_mask: [bs x seq_len]\n            attn_mask       : [1 x seq_len x seq_len]\n        Output shape:\n            output:  [bs x n_heads x q_len x d_v]\n            attn   : [bs x n_heads x q_len x seq_len]\n            scores : [bs x n_heads x q_len x seq_len]\n        '''\n\n        # Scaled MatMul (q, k) - similarity scores for all pairs of positions in an input sequence\n        attn_scores = torch.matmul(q, k) * self.scale  # attn_scores : [bs x n_heads x max_q_len x q_len]\n\n        # Add pre-softmax attention scores from the previous layer (optional)\n        if prev is not None: attn_scores = attn_scores + prev\n\n        # Attention mask (optional)\n        if attn_mask is not None:  # attn_mask with shape [q_len x seq_len] - only used when q_len == seq_len\n            if attn_mask.dtype == torch.bool:\n                attn_scores.masked_fill_(attn_mask, -np.inf)\n            else:\n                attn_scores += attn_mask\n\n        # Key padding mask (optional)\n        if key_padding_mask is not None:  # mask with shape [bs x q_len] (only when max_w_len == q_len)\n            attn_scores.masked_fill_(key_padding_mask.unsqueeze(1).unsqueeze(2), -np.inf)\n\n        # normalize the attention weights\n        attn_weights = F.softmax(attn_scores, dim=-1)  # attn_weights   : [bs x n_heads x max_q_len x q_len]\n        attn_weights = self.attn_dropout(attn_weights)\n\n        # compute the new values given the attention weights\n        output = torch.matmul(attn_weights, v)  # output: [bs x n_heads x max_q_len x d_v]\n\n        if self.res_attention:\n            return output, attn_weights, attn_scores\n        else:\n            return output, attn_weights\n\n"
  },
  {
    "path": "ts_classification_methods/patchtst/models/basics.py",
    "content": "__all__ = ['Transpose', 'LinBnDrop', 'SigmoidRange', 'sigmoid_range', 'get_activation_fn']\n\nimport torch\nfrom torch import nn\n\n\nclass Transpose(nn.Module):\n    def __init__(self, *dims, contiguous=False):\n        super().__init__()\n        self.dims, self.contiguous = dims, contiguous\n\n    def forward(self, x):\n        if self.contiguous:\n            return x.transpose(*self.dims).contiguous()\n        else:\n            return x.transpose(*self.dims)\n\n\nclass SigmoidRange(nn.Module):\n    def __init__(self, low, high):\n        super().__init__()\n        self.low, self.high = low, high\n        # self.low, self.high = ranges\n\n    def forward(self, x):\n        # return sigmoid_range(x, self.low, self.high)\n        return torch.sigmoid(x) * (self.high - self.low) + self.low\n\n\nclass LinBnDrop(nn.Sequential):\n    \"Module grouping `BatchNorm1d`, `Dropout` and `Linear` layers\"\n\n    def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=False):\n        layers = [nn.BatchNorm2d(n_out if lin_first else n_in, ndim=1)] if bn else []\n        if p != 0: layers.append(nn.Dropout(p))\n        lin = [nn.Linear(n_in, n_out, bias=not bn)]\n        if act is not None: lin.append(act)\n        layers = lin + layers if lin_first else layers + lin\n        super().__init__(*layers)\n\n\ndef sigmoid_range(x, low, high):\n    \"Sigmoid function with range `(low, high)`\"\n    return torch.sigmoid(x) * (high - low) + low\n\n\ndef get_activation_fn(activation):\n    if callable(activation):\n        return activation()\n    elif activation.lower() == \"relu\":\n        return nn.ReLU()\n    elif activation.lower() == \"gelu\":\n        return nn.GELU()\n    raise ValueError(f'{activation} is not available. You can use \"relu\", \"gelu\", or a callable')\n\n\n"
  },
  {
    "path": "ts_classification_methods/patchtst/models/heads.py",
    "content": "import torch\nfrom torch import nn\n\n\nclass LinearRegressionHead(nn.Module):\n    def __init__(self, n_vars, d_model, output_dim, head_dropout, y_range=None):\n        super().__init__()\n        self.y_range = y_range\n        self.flatten = nn.Flatten(start_dim=1)\n        self.dropout = nn.Dropout(head_dropout)\n        self.linear = nn.Linear(n_vars*d_model, output_dim)\n\n    def forward(self, x):\n        \"\"\"\n        x: [bs x nvars x d_model x num_patch]\n        output: [bs x output_dim]\n        \"\"\"\n        x = x[:,:,:,-1]             # only consider the last item in the sequence, x: bs x nvars x d_model\n        x = self.flatten(x)         # x: bs x nvars * d_model\n        x = self.dropout(x)\n        y = self.linear(x)         # y: bs x output_dim\n        if self.y_range: y = SigmoidRange(*self.y_range)(y)\n        return y\n\n\nclass LinearClassificationHead(nn.Module):\n    def __init__(self, n_vars, d_model, n_classes, head_dropout):\n        super().__init__()\n        self.flatten = nn.Flatten(start_dim=1)\n        self.dropout = nn.Dropout(head_dropout)\n        self.linear = nn.Linear(n_vars*d_model, n_classes)\n\n    def forward(self, x):\n        \"\"\"\n        x: [bs x nvars x d_model x num_patch]\n        output: [bs x n_classes]\n        \"\"\"\n        x = x[:,:,:,-1]             # only consider the last item in the sequence, x: bs x nvars x d_model\n        x = self.flatten(x)         # x: bs x nvars * d_model\n        x = self.dropout(x)\n        y = self.linear(x)         # y: bs x n_classes\n        return y\n\n\nclass LinearPredictionHead(nn.Module):\n    def __init__(self, individual, n_vars, d_model, num_patch, forecast_len, head_dropout=0):\n        super().__init__()\n\n        self.individual = individual\n        self.n_vars = n_vars\n        head_dim = d_model*num_patch\n\n        if self.individual:\n            self.linears = nn.ModuleList()\n            self.dropouts = nn.ModuleList()\n            self.flattens = nn.ModuleList()\n            for i in range(self.n_vars):\n                self.flattens.append(nn.Flatten(start_dim=-2))\n                self.linears.append(nn.Linear(head_dim, forecast_len))\n                self.dropouts.append(nn.Dropout(head_dropout))\n        else:\n            self.flatten = nn.Flatten(start_dim=-2)\n            self.linear = nn.Linear(head_dim, forecast_len)\n            self.dropout = nn.Dropout(head_dropout)\n\n\n    def forward(self, x):\n        \"\"\"\n        x: [bs x nvars x d_model x num_patch]\n        output: [bs x forecast_len x nvars]\n        \"\"\"\n        if self.individual:\n            x_out = []\n            for i in range(self.n_vars):\n                z = self.flattens[i](x[:,i,:,:])          # z: [bs x d_model * num_patch]\n                z = self.linears[i](z)                    # z: [bs x forecast_len]\n                z = self.dropouts[i](z)\n                x_out.append(z)\n            x = torch.stack(x_out, dim=1)         # x: [bs x nvars x forecast_len]\n        else:\n            x = self.flatten(x)\n            x = self.dropout(x)\n            x = self.linear(x)\n        return x.transpose(2,1)     # [bs x forecast_len x nvars]\n\n\nclass LinearPretrainHead(nn.Module):\n    def __init__(self, d_model, patch_len, dropout):\n        super().__init__()\n        self.dropout = nn.Dropout(dropout)\n        self.linear = nn.Linear(d_model, patch_len)\n\n    def forward(self, x):\n        \"\"\"\n        x: tensor [bs x nvars x d_model x num_patch]\n        output: tensor [bs x nvars x num_patch x patch_len]\n        \"\"\"\n\n        x = x.transpose(2,3)                     # [bs x nvars x num_patch x d_model]\n        x = self.linear( self.dropout(x) )      # [bs x nvars x num_patch x patch_len]\n        x = x.permute(0,2,1,3)                  # [bs x num_patch x nvars x patch_len]\n        return x\n\n"
  },
  {
    "path": "ts_classification_methods/patchtst/models/patchTST.py",
    "content": "__all__ = ['PatchTST']\n\nfrom patchtst.models.pos_encoding import *\nfrom patchtst.models.basics import *\nfrom patchtst.models.attention import *\n\n\n# Cell\nclass PatchTST(nn.Module):\n    \"\"\"\n    Output dimension:\n         [bs x target_dim x nvars] for prediction\n         [bs x target_dim] for regression\n         [bs x target_dim] for classification\n         [bs x num_patch x n_vars x patch_len] for pretrain\n    \"\"\"\n\n    def __init__(self, c_in: int, target_dim: int, patch_len: int, stride: int, num_patch: int,\n                 n_layers: int = 3, d_model=128, n_heads=16, shared_embedding=True, d_ff: int = 256,\n                 norm: str = 'BatchNorm', attn_dropout: float = 0., dropout: float = 0., act: str = \"gelu\",\n                 res_attention: bool = True, pre_norm: bool = False, store_attn: bool = False,\n                 pe: str = 'zeros', learn_pe: bool = True, head_dropout=0,\n                 head_type=\"prediction\", individual=False,\n                 y_range: Optional[tuple] = None, verbose: bool = False, **kwargs):\n\n        super().__init__()\n\n        assert head_type in ['pretrain', 'prediction', 'regression',\n                             'classification'], 'head type should be either pretrain, prediction, or regression'\n        # Backbone\n        self.backbone = PatchTSTEncoder(c_in, num_patch=num_patch, patch_len=patch_len,\n                                        n_layers=n_layers, d_model=d_model, n_heads=n_heads,\n                                        shared_embedding=shared_embedding, d_ff=d_ff,\n                                        attn_dropout=attn_dropout, dropout=dropout, act=act,\n                                        res_attention=res_attention, pre_norm=pre_norm, store_attn=store_attn,\n                                        pe=pe, learn_pe=learn_pe, verbose=verbose, **kwargs)\n\n        # Head\n        self.n_vars = c_in\n        self.head_type = head_type\n\n        if head_type == \"pretrain\":\n            self.head = PretrainHead(d_model, patch_len,\n                                     head_dropout)  # custom head passed as a partial func with all its kwargs\n        elif head_type == \"prediction\":\n            self.head = PredictionHead(individual, self.n_vars, d_model, num_patch, target_dim, head_dropout)\n        elif head_type == \"regression\":\n            self.head = RegressionHead(self.n_vars, d_model, target_dim, head_dropout, y_range)\n        elif head_type == \"classification\":\n            self.head = ClassificationHead(self.n_vars, d_model, target_dim, head_dropout)\n\n    def forward(self, z):\n        \"\"\"\n        z: tensor [bs x num_patch x n_vars x patch_len]\n        \"\"\"\n        # print(\"1 raw z.shape = \", z.shape)\n        z = self.backbone(z)  # z: [bs x nvars x d_model x num_patch]\n        # print(\"2 raw z.shape = \", z.shape)\n        z = self.head(z)\n        # print(\"3 raw z.shape = \", z.shape)\n        # z: [bs x target_dim x nvars] for prediction\n        #    [bs x target_dim] for regression\n        #    [bs x target_dim] for classification\n        #    [bs x num_patch x n_vars x patch_len] for pretrain\n        return z\n\n\nclass RegressionHead(nn.Module):\n    def __init__(self, n_vars, d_model, output_dim, head_dropout, y_range=None):\n        super().__init__()\n        self.y_range = y_range\n        self.flatten = nn.Flatten(start_dim=1)\n        self.dropout = nn.Dropout(head_dropout)\n        self.linear = nn.Linear(n_vars * d_model, output_dim)\n\n    def forward(self, x):\n        \"\"\"\n        x: [bs x nvars x d_model x num_patch]\n        output: [bs x output_dim]\n        \"\"\"\n        x = x[:, :, :, -1]  # only consider the last item in the sequence, x: bs x nvars x d_model\n        x = self.flatten(x)  # x: bs x nvars * d_model\n        x = self.dropout(x)\n        y = self.linear(x)  # y: bs x output_dim\n        if self.y_range: y = SigmoidRange(*self.y_range)(y)\n        return y\n\n\nclass ClassificationHead(nn.Module):\n    def __init__(self, n_vars, d_model, n_classes, head_dropout):\n        super().__init__()\n        self.flatten = nn.Flatten(start_dim=1)\n        self.dropout = nn.Dropout(head_dropout)\n        self.linear = nn.Linear(n_vars * d_model, n_classes)\n\n    def forward(self, x):\n        \"\"\"\n        x: [bs x nvars x d_model x num_patch]\n        output: [bs x n_classes]\n        \"\"\"\n        # print(\"1 x.shape = \", x.shape)\n        x = x[:, :, :, -1]  # only consider the last item in the sequence, x: bs x nvars x d_model\n        # print(\"2 x.shape = \", x.shape)\n        x = self.flatten(x)  # x: bs x nvars * d_model\n        # print(\"3 x.shape = \", x.shape)\n        x = self.dropout(x)\n        y = self.linear(x)  # y: bs x n_classes\n        return y\n\n\nclass PredictionHead(nn.Module):\n    def __init__(self, individual, n_vars, d_model, num_patch, forecast_len, head_dropout=0, flatten=False):\n        super().__init__()\n\n        self.individual = individual\n        self.n_vars = n_vars\n        self.flatten = flatten\n        head_dim = d_model * num_patch\n\n        if self.individual:\n            self.linears = nn.ModuleList()\n            self.dropouts = nn.ModuleList()\n            self.flattens = nn.ModuleList()\n            for i in range(self.n_vars):\n                self.flattens.append(nn.Flatten(start_dim=-2))\n                self.linears.append(nn.Linear(head_dim, forecast_len))\n                self.dropouts.append(nn.Dropout(head_dropout))\n        else:\n            self.flatten = nn.Flatten(start_dim=-2)\n            self.linear = nn.Linear(head_dim, forecast_len)\n            self.dropout = nn.Dropout(head_dropout)\n\n    def forward(self, x):\n        \"\"\"\n        x: [bs x nvars x d_model x num_patch]\n        output: [bs x forecast_len x nvars]\n        \"\"\"\n        if self.individual:\n            x_out = []\n            for i in range(self.n_vars):\n                z = self.flattens[i](x[:, i, :, :])  # z: [bs x d_model * num_patch]\n                z = self.linears[i](z)  # z: [bs x forecast_len]\n                z = self.dropouts[i](z)\n                x_out.append(z)\n            x = torch.stack(x_out, dim=1)  # x: [bs x nvars x forecast_len]\n        else:\n            x = self.flatten(x)  # x: [bs x nvars x (d_model * num_patch)]\n            x = self.dropout(x)\n            x = self.linear(x)  # x: [bs x nvars x forecast_len]\n        return x.transpose(2, 1)  # [bs x forecast_len x nvars]\n\n\nclass PretrainHead(nn.Module):\n    def __init__(self, d_model, patch_len, dropout):\n        super().__init__()\n        self.dropout = nn.Dropout(dropout)\n        self.linear = nn.Linear(d_model, patch_len)\n\n    def forward(self, x):\n        \"\"\"\n        x: tensor [bs x nvars x d_model x num_patch]\n        output: tensor [bs x nvars x num_patch x patch_len]\n        \"\"\"\n\n        x = x.transpose(2, 3)  # [bs x nvars x num_patch x d_model]\n        x = self.linear(self.dropout(x))  # [bs x nvars x num_patch x patch_len]\n        x = x.permute(0, 2, 1, 3)  # [bs x num_patch x nvars x patch_len]\n        return x\n\n\nclass PatchTSTEncoder(nn.Module):\n    def __init__(self, c_in, num_patch, patch_len,\n                 n_layers=3, d_model=128, n_heads=16, shared_embedding=True,\n                 d_ff=256, norm='BatchNorm', attn_dropout=0., dropout=0., act=\"gelu\", store_attn=False,\n                 res_attention=True, pre_norm=False,\n                 pe='zeros', learn_pe=True, verbose=False, **kwargs):\n\n        super().__init__()\n        self.n_vars = c_in\n        self.num_patch = num_patch\n        self.patch_len = patch_len\n        self.d_model = d_model\n        self.shared_embedding = shared_embedding\n\n        # Input encoding: projection of feature vectors onto a d-dim vector space\n        if not shared_embedding:\n            self.W_P = nn.ModuleList()\n            for _ in range(self.n_vars): self.W_P.append(nn.Linear(patch_len, d_model))\n        else:\n            self.W_P = nn.Linear(patch_len, d_model)\n\n            # Positional encoding\n        self.W_pos = positional_encoding(pe, learn_pe, num_patch, d_model)\n\n        # Residual dropout\n        self.dropout = nn.Dropout(dropout)\n\n        # Encoder\n        self.encoder = TSTEncoder(d_model, n_heads, d_ff=d_ff, norm=norm, attn_dropout=attn_dropout, dropout=dropout,\n                                  pre_norm=pre_norm, activation=act, res_attention=res_attention, n_layers=n_layers,\n                                  store_attn=store_attn)\n\n    def forward(self, x) -> Tensor:\n        \"\"\"\n        x: tensor [bs x num_patch x nvars x patch_len]\n        \"\"\"\n        bs, num_patch, n_vars, patch_len = x.shape\n        # Input encoding\n        if not self.shared_embedding:\n            x_out = []\n            for i in range(n_vars):\n                z = self.W_P[i](x[:, :, i, :])\n                x_out.append(z)\n            x = torch.stack(x_out, dim=2)\n        else:\n            x = self.W_P(x)  # x: [bs x num_patch x nvars x d_model]\n        x = x.transpose(1, 2)  # x: [bs x nvars x num_patch x d_model]\n\n        u = torch.reshape(x, (bs * n_vars, num_patch, self.d_model))  # u: [bs * nvars x num_patch x d_model]\n        u = self.dropout(u + self.W_pos)  # u: [bs * nvars x num_patch x d_model]\n\n        # print(\"before trans u.shape = \", u.shape)\n        # Encoder\n        z = self.encoder(u)  # z: [bs * nvars x num_patch x d_model]\n        # print(\"end trans u.shape = \", z.shape)\n        z = torch.reshape(z, (-1, n_vars, num_patch, self.d_model))  # z: [bs x nvars x num_patch x d_model]\n        z = z.permute(0, 1, 3, 2)  # z: [bs x nvars x d_model x num_patch]\n\n        return z\n\n\n# Cell\nclass TSTEncoder(nn.Module):\n    def __init__(self, d_model, n_heads, d_ff=None,\n                 norm='BatchNorm', attn_dropout=0., dropout=0., activation='gelu',\n                 res_attention=False, n_layers=1, pre_norm=False, store_attn=False):\n        super().__init__()\n\n        self.layers = nn.ModuleList([TSTEncoderLayer(d_model, n_heads=n_heads, d_ff=d_ff, norm=norm,\n                                                     attn_dropout=attn_dropout, dropout=dropout,\n                                                     activation=activation, res_attention=res_attention,\n                                                     pre_norm=pre_norm, store_attn=store_attn) for i in\n                                     range(n_layers)])\n        self.res_attention = res_attention\n\n    def forward(self, src: Tensor):\n        \"\"\"\n        src: tensor [bs x q_len x d_model]\n        \"\"\"\n        output = src\n        scores = None\n        if self.res_attention:\n            for mod in self.layers: output, scores = mod(output, prev=scores)\n            return output\n        else:\n            for mod in self.layers: output = mod(output)\n            return output\n\n\nclass TSTEncoderLayer(nn.Module):\n    def __init__(self, d_model, n_heads, d_ff=256, store_attn=False,\n                 norm='BatchNorm', attn_dropout=0, dropout=0., bias=True,\n                 activation=\"gelu\", res_attention=False, pre_norm=False):\n        super().__init__()\n        assert not d_model % n_heads, f\"d_model ({d_model}) must be divisible by n_heads ({n_heads})\"\n        d_k = d_model // n_heads\n        d_v = d_model // n_heads\n\n        # Multi-Head attention\n        self.res_attention = res_attention\n        self.self_attn = MultiheadAttention(d_model, n_heads, d_k, d_v, attn_dropout=attn_dropout, proj_dropout=dropout,\n                                            res_attention=res_attention)\n\n        # Add & Norm\n        self.dropout_attn = nn.Dropout(dropout)\n        if \"batch\" in norm.lower():\n            self.norm_attn = nn.Sequential(Transpose(1, 2), nn.BatchNorm1d(d_model), Transpose(1, 2))\n        else:\n            self.norm_attn = nn.LayerNorm(d_model)\n\n        # Position-wise Feed-Forward\n        self.ff = nn.Sequential(nn.Linear(d_model, d_ff, bias=bias),\n                                get_activation_fn(activation),\n                                nn.Dropout(dropout),\n                                nn.Linear(d_ff, d_model, bias=bias))\n\n        # Add & Norm\n        self.dropout_ffn = nn.Dropout(dropout)\n        if \"batch\" in norm.lower():\n            self.norm_ffn = nn.Sequential(Transpose(1, 2), nn.BatchNorm1d(d_model), Transpose(1, 2))\n        else:\n            self.norm_ffn = nn.LayerNorm(d_model)\n\n        self.pre_norm = pre_norm\n        self.store_attn = store_attn\n\n    def forward(self, src: Tensor, prev: Optional[Tensor] = None):\n        \"\"\"\n        src: tensor [bs x q_len x d_model]\n        \"\"\"\n        # Multi-Head attention sublayer\n        if self.pre_norm:\n            src = self.norm_attn(src)\n        ## Multi-Head attention\n        if self.res_attention:\n            src2, attn, scores = self.self_attn(src, src, src, prev)\n        else:\n            src2, attn = self.self_attn(src, src, src)\n        if self.store_attn:\n            self.attn = attn\n        ## Add & Norm\n        src = src + self.dropout_attn(src2)  # Add: residual connection with residual dropout\n        if not self.pre_norm:\n            src = self.norm_attn(src)\n\n        # Feed-forward sublayer\n        if self.pre_norm:\n            src = self.norm_ffn(src)\n        ## Position-wise Feed-Forward\n        src2 = self.ff(src)\n        ## Add & Norm\n        src = src + self.dropout_ffn(src2)  # Add: residual connection with residual dropout\n        if not self.pre_norm:\n            src = self.norm_ffn(src)\n\n        if self.res_attention:\n            return src, scores\n        else:\n            return src\n\n\n\n"
  },
  {
    "path": "ts_classification_methods/patchtst/models/pos_encoding.py",
    "content": "__all__ = ['PositionalEncoding', 'SinCosPosEncoding', 'positional_encoding']\n\n# Cell\n\nimport torch\nfrom torch import nn\nimport math\n\n# Cell\ndef PositionalEncoding(q_len, d_model, normalize=True):\n    pe = torch.zeros(q_len, d_model)\n    position = torch.arange(0, q_len).unsqueeze(1)\n    div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))\n    pe[:, 0::2] = torch.sin(position * div_term)\n    pe[:, 1::2] = torch.cos(position * div_term)\n    if normalize:\n        pe = pe - pe.mean()\n        pe = pe / (pe.std() * 10)\n    return pe\n\nSinCosPosEncoding = PositionalEncoding\n\n\ndef positional_encoding(pe, learn_pe, q_len, d_model):\n    # Positional encoding\n    if pe == None:\n        W_pos = torch.empty((q_len, d_model)) # pe = None and learn_pe = False can be used to measure impact of pe\n        nn.init.uniform_(W_pos, -0.02, 0.02)\n        learn_pe = False\n    elif pe == 'zero':\n        W_pos = torch.empty((q_len, 1))\n        nn.init.uniform_(W_pos, -0.02, 0.02)\n    elif pe == 'zeros':\n        W_pos = torch.empty((q_len, d_model))\n        nn.init.uniform_(W_pos, -0.02, 0.02)\n    elif pe == 'normal' or pe == 'gauss':\n        W_pos = torch.zeros((q_len, 1))\n        torch.nn.init.normal_(W_pos, mean=0.0, std=0.1)\n    elif pe == 'uniform':\n        W_pos = torch.zeros((q_len, 1))\n        nn.init.uniform_(W_pos, a=0.0, b=0.1)\n    elif pe == 'sincos': W_pos = PositionalEncoding(q_len, d_model, normalize=True)\n    else: raise ValueError(f\"{pe} is not a valid pe (positional encoder. Available types: 'gauss'=='normal', \\\n        'zeros', 'zero', uniform', 'sincos', None.)\")\n    return nn.Parameter(W_pos, requires_grad=learn_pe)\n\n"
  },
  {
    "path": "ts_classification_methods/patchtst/models/revin.py",
    "content": "import torch\nfrom torch import nn\n\nclass RevIN(nn.Module):\n    def __init__(self, num_features: int, eps=1e-5, affine=True):\n        \"\"\"\n        :param num_features: the number of features or channels\n        :param eps: a value added for numerical stability\n        :param affine: if True, RevIN has learnable affine parameters\n        \"\"\"\n        super(RevIN, self).__init__()\n        self.num_features = num_features\n        self.eps = eps\n        self.affine = affine\n        if self.affine:\n            self._init_params()\n\n    def forward(self, x, mode:str):\n        if mode == 'norm':\n            self._get_statistics(x)\n            x = self._normalize(x)\n        elif mode == 'denorm':\n            x = self._denormalize(x)\n        else: raise NotImplementedError\n        return x\n\n    def _init_params(self):\n        # initialize RevIN params: (C,)\n        self.affine_weight = nn.Parameter(torch.ones(self.num_features))\n        self.affine_bias = nn.Parameter(torch.zeros(self.num_features))\n\n    def _get_statistics(self, x):\n        dim2reduce = tuple(range(1, x.ndim-1))\n        self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()\n        self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()\n\n    def _normalize(self, x):\n        x = x - self.mean\n        x = x / self.stdev\n        if self.affine:\n            x = x * self.affine_weight\n            x = x + self.affine_bias\n        return x\n\n    def _denormalize(self, x):\n        if self.affine:\n            x = x - self.affine_bias\n            x = x / (self.affine_weight + self.eps*self.eps)\n        x = x * self.stdev\n        x = x + self.mean\n        return x\n"
  },
  {
    "path": "ts_classification_methods/patchtst/patch_mask.py",
    "content": "\nfrom torch import nn\nimport torch\n\n\nDTYPE = torch.float32\n\n\nclass GetAttr:\n    \"Inherit from this to have all attr accesses in `self._xtra` passed down to `self.default`\"\n    _default = 'default'\n\n    def _component_attr_filter(self, k):\n        if k.startswith('__') or k in ('_xtra', self._default): return False\n        xtra = getattr(self, '_xtra', None)\n        return xtra is None or k in xtra\n\n    def _dir(self):\n        return [k for k in dir(getattr(self, self._default)) if self._component_attr_filter(k)]\n\n    def __getattr__(self, k):\n        if self._component_attr_filter(k):\n            attr = getattr(self, self._default, None)\n            if attr is not None: return getattr(attr, k)\n        # raise AttributeError(k)\n\n    def __dir__(self):\n        return custom_dir(self, self._dir())\n\n    #     def __getstate__(self): return self.__dict__\n    def __setstate__(self, data):\n        self.__dict__.update(data)\n\n\ndef get_device(use_cuda=True, device_id=None, usage=5):\n    \"Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU\"\n    if not torch.cuda.is_available():\n        use_cuda = False\n    else:\n        if device_id is None:\n            device_ids = get_available_cuda(usage=usage)\n            device_id = device_ids[0]  # get the first available device\n        torch.cuda.set_device(device_id)\n    return torch.device(torch.cuda.current_device()) if use_cuda else torch.device('cpu')\n\n\ndef set_device(usage=5):\n    \"set the device that has usage < default usage  \"\n    device_ids = get_available_cuda(usage=usage)\n    torch.cuda.set_device(device_ids[0])  # get the first available device\n\n\ndef default_device(use_cuda=True):\n    \"Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU\"\n    if not torch.cuda.is_available():\n        use_cuda = False\n    return torch.device(torch.cuda.current_device()) if use_cuda else torch.device('cpu')\n\n\ndef get_available_cuda(usage=10):\n    if not torch.cuda.is_available(): return\n    # collect available cuda devices, only collect devices that has less that 'usage' percent\n    device_ids = []\n    for device in range(torch.cuda.device_count()):\n        if torch.cuda.utilization(device) < usage: device_ids.append(device)\n    return device_ids\n\n\ndef to_device(b, device=None, non_blocking=False):\n    \"\"\"\n    Recursively put `b` on `device`\n    components of b are torch tensors\n    \"\"\"\n    if device is None:\n        device = default_device(use_cuda=True)\n\n    if isinstance(b, dict):\n        return {key: to_device(val, device) for key, val in b.items()}\n\n    if isinstance(b, (list, tuple)):\n        return type(b)(to_device(o, device) for o in b)\n\n    return b.to(device, non_blocking=non_blocking)\n\n\ndef to_numpy(b):\n    \"\"\"\n    Components of b are torch tensors\n    \"\"\"\n    if isinstance(b, dict):\n        return {key: to_numpy(val) for key, val in b.items()}\n\n    if isinstance(b, (list, tuple)):\n        return type(b)(to_numpy(o) for o in b)\n\n    return b.detach().cpu().numpy()\n\n\nclass Callback(GetAttr):\n    _default = 'learner'\n\n\nclass SetupLearnerCB(Callback):\n    def __init__(self):\n        self.device = default_device(use_cuda=True)\n\n    def before_batch_train(self):\n        self._to_device()\n\n    def before_batch_valid(self):\n        self._to_device()\n\n    def before_batch_predict(self):\n        self._to_device()\n\n    def before_batch_test(self):\n        self._to_device()\n\n    def _to_device(self):\n        batch = to_device(self.batch, self.device)\n        if self.n_inp > 1:\n            xb, yb = batch\n        else:\n            xb, yb = batch, None\n        self.learner.batch = xb, yb\n\n    def before_fit(self):\n        \"Set model to cuda before training\"\n        self.learner.model.to(self.device)\n        self.learner.device = self.device\n\n\nclass GetPredictionsCB(Callback):\n    def __init__(self):\n        super().__init__()\n\n    def before_predict(self):\n        self.preds = []\n\n    def after_batch_predict(self):\n        # append the prediction after each forward batch\n        self.preds.append(self.pred)\n\n    def after_predict(self):\n        self.preds = torch.concat(self.preds)  # .detach().cpu().numpy()\n\n\nclass GetTestCB(Callback):\n    def __init__(self):\n        super().__init__()\n\n    def before_test(self):\n        self.preds, self.targets = [], []\n\n    def after_batch_test(self):\n        # append the prediction after each forward batch\n        self.preds.append(self.pred)\n        self.targets.append(self.yb)\n\n    def after_test(self):\n        self.preds = torch.concat(self.preds)  # .detach().cpu().numpy()\n        self.targets = torch.concat(self.targets)  # .detach().cpu().numpy()\n\n\n# Cell\nclass PatchCB(Callback):\n\n    def __init__(self, patch_len, stride):\n        \"\"\"\n        Callback used to perform patching on the batch input data\n        Args:\n            patch_len:        patch length\n            stride:           stride\n        \"\"\"\n        self.patch_len = patch_len\n        self.stride = stride\n\n    def before_forward(self): self.set_patch()\n\n    def set_patch(self):\n        \"\"\"\n        take xb from learner and convert to patch: [bs x seq_len x n_vars] -> [bs x num_patch x n_vars x patch_len]\n        \"\"\"\n        xb_patch, num_patch = create_patch(self.xb, self.patch_len, self.stride)  # xb: [bs x seq_len x n_vars]\n        # learner get the transformed input\n        self.learner.xb = xb_patch  # xb_patch: [bs x num_patch x n_vars x patch_len]\n\n\nclass PatchMaskCB(Callback):\n    def __init__(self, patch_len, stride, mask_ratio,\n                 mask_when_pred: bool = False):\n        \"\"\"\n        Callback used to perform the pretext task of reconstruct the original data after a binary mask has been applied.\n        Args:\n            patch_len:        patch length\n            stride:           stride\n            mask_ratio:       mask ratio\n        \"\"\"\n        self.patch_len = patch_len\n        self.stride = stride\n        self.mask_ratio = mask_ratio\n\n    def before_fit(self):\n        # overwrite the predefined loss function\n        self.learner.loss_func = self._loss\n        device = self.learner.device\n\n    def before_forward(self): self.patch_masking()\n\n    def patch_masking(self):\n        \"\"\"\n        xb: [bs x seq_len x n_vars] -> [bs x num_patch x n_vars x patch_len]\n        \"\"\"\n        xb_patch, num_patch = create_patch(self.xb, self.patch_len,\n                                           self.stride)  # xb_patch: [bs x num_patch x n_vars x patch_len]\n        xb_mask, _, self.mask, _ = random_masking(xb_patch,\n                                                  self.mask_ratio)  # xb_mask: [bs x num_patch x n_vars x patch_len]\n        self.mask = self.mask.bool()  # mask: [bs x num_patch x n_vars]\n        self.learner.xb = xb_mask  # learner.xb: masked 4D tensor\n        self.learner.yb = xb_patch  # learner.yb: non-masked 4d tensor\n\n    def _loss(self, preds, target):\n        \"\"\"\n        preds:   [bs x num_patch x n_vars x patch_len]\n        targets: [bs x num_patch x n_vars x patch_len]\n        \"\"\"\n        loss = (preds - target) ** 2\n        loss = loss.mean(dim=-1)\n        loss = (loss * self.mask).sum() / self.mask.sum()\n        return loss\n\n\ndef create_patch(xb, patch_len, stride):\n    \"\"\"\n    xb: [bs x seq_len x n_vars]\n    \"\"\"\n    seq_len = xb.shape[1]\n    num_patch = (max(seq_len, patch_len) - patch_len) // stride + 1\n    tgt_len = patch_len + stride * (num_patch - 1)\n    s_begin = seq_len - tgt_len\n\n    xb = xb[:, s_begin:, :]  # xb: [bs x tgt_len x nvars]\n    xb = xb.unfold(dimension=1, size=patch_len, step=stride)  # xb: [bs x num_patch x n_vars x patch_len]\n    return xb, num_patch\n\n\nclass Patch(nn.Module):\n    def __init__(self, seq_len, patch_len, stride):\n        super().__init__()\n        self.seq_len = seq_len\n        self.patch_len = patch_len\n        self.stride = stride\n        self.num_patch = (max(seq_len, patch_len) - patch_len) // stride + 1\n        tgt_len = patch_len + stride * (self.num_patch - 1)\n        self.s_begin = seq_len - tgt_len\n\n    def forward(self, x):\n        \"\"\"\n        x: [bs x seq_len x n_vars]\n        \"\"\"\n        x = x[:, self.s_begin:, :]\n        x = x.unfold(dimension=1, size=self.patch_len, step=self.stride)  # xb: [bs x num_patch x n_vars x patch_len]\n        return x\n\n\ndef random_masking(xb, mask_ratio):\n    # xb: [bs x num_patch x n_vars x patch_len]\n    bs, L, nvars, D = xb.shape\n    x = xb.clone()\n\n    len_keep = int(L * (1 - mask_ratio))\n\n    noise = torch.rand(bs, L, nvars, device=xb.device)  # noise in [0, 1], bs x L x nvars\n\n    # sort noise for each sample\n    ids_shuffle = torch.argsort(noise, dim=1)  # ascend: small is keep, large is remove\n    ids_restore = torch.argsort(ids_shuffle, dim=1)  # ids_restore: [bs x L x nvars]\n\n    # keep the first subset\n    ids_keep = ids_shuffle[:, :len_keep, :]  # ids_keep: [bs x len_keep x nvars]\n    x_kept = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, 1,\n                                                                        D))  # x_kept: [bs x len_keep x nvars  x patch_len]\n\n    # removed x\n    x_removed = torch.zeros(bs, L - len_keep, nvars, D,\n                            device=xb.device)  # x_removed: [bs x (L-len_keep) x nvars x patch_len]\n    x_ = torch.cat([x_kept, x_removed], dim=1)  # x_: [bs x L x nvars x patch_len]\n\n    # combine the kept part and the removed one\n    x_masked = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, 1,\n                                                                              D))  # x_masked: [bs x num_patch x nvars x patch_len]\n\n    # generate the binary mask: 0 is keep, 1 is remove\n    mask = torch.ones([bs, L, nvars], device=x.device)  # mask: [bs x num_patch x nvars]\n    mask[:, :len_keep, :] = 0\n    # unshuffle to get the binary mask\n    mask = torch.gather(mask, dim=1, index=ids_restore)  # [bs x num_patch x nvars]\n    return x_masked, x_kept, mask, ids_restore\n\n\ndef random_masking_3D(xb, mask_ratio):\n    # xb: [bs x num_patch x dim]\n    bs, L, D = xb.shape\n    x = xb.clone()\n\n    len_keep = int(L * (1 - mask_ratio))\n\n    noise = torch.rand(bs, L, device=xb.device)  # noise in [0, 1], bs x L\n\n    # sort noise for each sample\n    ids_shuffle = torch.argsort(noise, dim=1)  # ascend: small is keep, large is remove\n    ids_restore = torch.argsort(ids_shuffle, dim=1)  # ids_restore: [bs x L]\n\n    # keep the first subset\n    ids_keep = ids_shuffle[:, :len_keep]  # ids_keep: [bs x len_keep]\n    x_kept = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))  # x_kept: [bs x len_keep x dim]\n\n    # removed x\n    x_removed = torch.zeros(bs, L - len_keep, D, device=xb.device)  # x_removed: [bs x (L-len_keep) x dim]\n    x_ = torch.cat([x_kept, x_removed], dim=1)  # x_: [bs x L x dim]\n\n    # combine the kept part and the removed one\n    x_masked = torch.gather(x_, dim=1,\n                            index=ids_restore.unsqueeze(-1).repeat(1, 1, D))  # x_masked: [bs x num_patch x dim]\n\n    # generate the binary mask: 0 is keep, 1 is remove\n    mask = torch.ones([bs, L], device=x.device)  # mask: [bs x num_patch]\n    mask[:, :len_keep] = 0\n    # unshuffle to get the binary mask\n    mask = torch.gather(mask, dim=1, index=ids_restore)  # [bs x num_patch]\n    return x_masked, x_kept, mask, ids_restore\n\n\nif __name__ == \"__main__\":\n    bs, L, nvars, D = 2, 20, 4, 5\n    xb = torch.randn(bs, L, nvars, D)\n    xb_mask, mask, ids_restore = create_mask(xb, mask_ratio=0.5)\n    breakpoint()\n\n\n"
  },
  {
    "path": "ts_classification_methods/patchtst/scripts/generator_patchtst.py",
    "content": "uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Cricket', 'DuckDuckGeese', 'EigenWorms', 'Epilepsy', 'EthanolConcentration', 'ERing',\n           'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting',\n           'Heartbeat', 'InsectWingbeat', 'JapaneseVowels', 'Libras', 'LSST', 'MotorImagery',\n           'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1',\n           'SelfRegulationSCP2', 'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary']\n\nucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n               'Beef',\n               'BeetleFly', 'BirdChicken', 'CBF', 'Car', 'Chinatown', 'ChlorineConcentration', 'CinCECGTorso', 'Coffee',\n               'Computers',\n               'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction', 'DistalPhalanxOutlineAgeGroup',\n               'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame', 'DodgerLoopWeekend',\n               'ECG200', 'ECG5000', 'ECGFiveDays', 'EOGHorizontalSignal', 'EOGVerticalSignal', 'Earthquakes',\n               'ElectricDevices',\n               'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords', 'Fish', 'FordA', 'FordB',\n               'FreezerRegularTrain',\n               'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3', 'GesturePebbleZ1',\n               'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan', 'GunPointMaleVersusFemale', 'GunPointOldVersusYoung',\n               'Ham',\n               'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain',\n               'InsectEPGSmallTrain',\n               'InsectWingbeatSound', 'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2', 'Lightning7',\n               'Mallat', 'Meat',\n               'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',\n               'MiddlePhalanxTW', 'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain',\n               'NonInvasiveFetalECGThorax1',\n               'NonInvasiveFetalECGThorax2', 'OSULeaf', 'OliveOil', 'PLAID', 'PhalangesOutlinesCorrect', 'Phoneme',\n               'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'Plane', 'PowerCons',\n               'ProximalPhalanxOutlineAgeGroup', 'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',\n               'RefrigerationDevices',\n               'Rock', 'ScreenType', 'SemgHandGenderCh2', 'SemgHandMovementCh2', 'SemgHandSubjectCh2',\n               'ShakeGestureWiimoteZ',\n               'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace', 'SonyAIBORobotSurface1',\n               'SonyAIBORobotSurface2', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',\n               'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'TwoPatterns', 'UMD',\n               'UWaveGestureLibraryAll',\n               'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine', 'WordSynonyms',\n               'Worms',\n               'WormsTwoClass', 'Yoga']\n\ncode_main = 'main_gpt4ts_uea'   ## main_patchtst_ucr  main_gpt4ts_ucr  mian_patchtst\n\n_mu_all = [8]  ## , 16, 24, 32, 48, 64\n\ni = 1\nfor dataset in uea_all:\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n\n    save_csv_name = code_main + '_0702_'  ##  --len_k\n\n    with open('/dev_data/lz/time_series_label_noise/patchtst/scripts/patchtst_ucr.sh', 'a') as f:\n        f.write('python ' + code_main + '.py '\n                '--dataset ' + dataset\n                + ' --epoch 1000 '+\n                '--save_csv_name ' + save_csv_name + ' --cuda cuda:1' + ';\\n')\n\n## nohup ./scripts/patchtst_uea.sh &\n\n## nohup ./scripts/patchtst_ucr.sh &"
  },
  {
    "path": "ts_classification_methods/scripts/dilated_single_norm.sh",
    "content": "python train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Adiac --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteX --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteY --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ArrowHead --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BME --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Beef --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BeetleFly --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BirdChicken --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CBF --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Car --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Chinatown --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ChlorineConcentration --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CinCECGTorso --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Computers --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketX --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketY --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Crop --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DiatomSizeReduction --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxTW --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopDay --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopGame --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopWeekend --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG200 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG5000 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECGFiveDays --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGHorizontalSignal --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGVerticalSignal --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Earthquakes --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ElectricDevices --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EthanolLevel --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceAll --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceFour --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FacesUCR --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FiftyWords --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fish --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordA --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordB --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerRegularTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerSmallTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fungi --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD3 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPoint --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointAgeSpan --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointMaleVersusFemale --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointOldVersusYoung --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Ham --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HandOutlines --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Haptics --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Herring --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HouseTwenty --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InlineSkate --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGRegularTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGSmallTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectWingbeatSound --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ItalyPowerDemand --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset LargeKitchenAppliances --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning7 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Mallat --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Meat --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MedicalImages --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MelbournePedestrian --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxTW --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesRegularTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesSmallTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MoteStrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OSULeaf --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OliveOil --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PLAID --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PhalangesOutlinesCorrect --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Phoneme --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PickupGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigAirwayPressure --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigArtPressure --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigCVP --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Plane --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PowerCons --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxTW --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset RefrigerationDevices --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Rock --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ScreenType --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandGenderCh2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandMovementCh2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandSubjectCh2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShakeGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapeletSim --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapesAll --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmallKitchenAppliances --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmoothSubspace --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset StarLightCurves --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Strawberry --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SwedishLeaf --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Symbols --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SyntheticControl --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Trace --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoLeadECG --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoPatterns --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UMD --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryAll --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryX --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryY --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wafer --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wine --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WordSynonyms --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Worms --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WormsTwoClass --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Yoga --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Adiac --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteX --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteY --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ArrowHead --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BME --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Beef --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BeetleFly --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BirdChicken --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CBF --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Car --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Chinatown --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ChlorineConcentration --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CinCECGTorso --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Computers --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketX --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketY --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Crop --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DiatomSizeReduction --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxTW --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopDay --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopGame --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopWeekend --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG200 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG5000 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECGFiveDays --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGHorizontalSignal --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGVerticalSignal --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Earthquakes --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ElectricDevices --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EthanolLevel --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceAll --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceFour --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FacesUCR --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FiftyWords --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fish --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordA --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordB --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerRegularTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerSmallTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fungi --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD3 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPoint --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointAgeSpan --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointMaleVersusFemale --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointOldVersusYoung --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Ham --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HandOutlines --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Haptics --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Herring --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HouseTwenty --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InlineSkate --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGRegularTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGSmallTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectWingbeatSound --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ItalyPowerDemand --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset LargeKitchenAppliances --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning7 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Mallat --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Meat --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MedicalImages --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MelbournePedestrian --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxTW --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesRegularTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesSmallTrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MoteStrain --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OSULeaf --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OliveOil --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PLAID --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PhalangesOutlinesCorrect --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Phoneme --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PickupGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigAirwayPressure --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigArtPressure --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigCVP --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Plane --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PowerCons --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxTW --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset RefrigerationDevices --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Rock --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ScreenType --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandGenderCh2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandMovementCh2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandSubjectCh2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShakeGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapeletSim --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapesAll --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmallKitchenAppliances --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmoothSubspace --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset StarLightCurves --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Strawberry --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SwedishLeaf --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Symbols --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SyntheticControl --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation1 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation2 --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Trace --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoLeadECG --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoPatterns --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UMD --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryAll --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryX --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryY --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryZ --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wafer --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wine --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WordSynonyms --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Worms --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WormsTwoClass --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Yoga --mode directly_cls --epoch 1000 --depth 3  --loss cross_entropy --save_csv_name dilated3_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Adiac --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteX --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteY --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ArrowHead --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BME --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Beef --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BeetleFly --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BirdChicken --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CBF --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Car --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Chinatown --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ChlorineConcentration --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CinCECGTorso --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Computers --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketX --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketY --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Crop --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DiatomSizeReduction --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxTW --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopDay --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopGame --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopWeekend --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG200 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG5000 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECGFiveDays --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGHorizontalSignal --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGVerticalSignal --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Earthquakes --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ElectricDevices --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EthanolLevel --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceAll --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceFour --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FacesUCR --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FiftyWords --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fish --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordA --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordB --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerRegularTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerSmallTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fungi --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD3 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPoint --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointAgeSpan --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointMaleVersusFemale --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointOldVersusYoung --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Ham --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HandOutlines --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Haptics --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Herring --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HouseTwenty --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InlineSkate --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGRegularTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGSmallTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectWingbeatSound --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ItalyPowerDemand --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset LargeKitchenAppliances --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning7 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Mallat --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Meat --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MedicalImages --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MelbournePedestrian --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxTW --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesRegularTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesSmallTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MoteStrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OSULeaf --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OliveOil --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PLAID --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PhalangesOutlinesCorrect --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Phoneme --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PickupGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigAirwayPressure --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigArtPressure --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigCVP --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Plane --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PowerCons --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxTW --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset RefrigerationDevices --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Rock --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ScreenType --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandGenderCh2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandMovementCh2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandSubjectCh2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShakeGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapeletSim --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapesAll --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmallKitchenAppliances --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmoothSubspace --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset StarLightCurves --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Strawberry --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SwedishLeaf --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Symbols --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SyntheticControl --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Trace --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoLeadECG --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoPatterns --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UMD --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryAll --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryX --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryY --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wafer --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wine --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WordSynonyms --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Worms --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WormsTwoClass --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Yoga --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Adiac --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteX --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteY --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ArrowHead --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BME --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Beef --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BeetleFly --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BirdChicken --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CBF --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Car --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Chinatown --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ChlorineConcentration --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CinCECGTorso --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Computers --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketX --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketY --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Crop --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DiatomSizeReduction --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxTW --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopDay --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopGame --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopWeekend --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG200 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG5000 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECGFiveDays --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGHorizontalSignal --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGVerticalSignal --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Earthquakes --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ElectricDevices --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EthanolLevel --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceAll --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceFour --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FacesUCR --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FiftyWords --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fish --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordA --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordB --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerRegularTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerSmallTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fungi --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD3 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPoint --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointAgeSpan --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointMaleVersusFemale --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointOldVersusYoung --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Ham --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HandOutlines --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Haptics --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Herring --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HouseTwenty --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InlineSkate --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGRegularTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGSmallTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectWingbeatSound --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ItalyPowerDemand --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset LargeKitchenAppliances --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning7 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Mallat --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Meat --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MedicalImages --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MelbournePedestrian --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxTW --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesRegularTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesSmallTrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MoteStrain --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OSULeaf --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OliveOil --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PLAID --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PhalangesOutlinesCorrect --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Phoneme --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PickupGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigAirwayPressure --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigArtPressure --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigCVP --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Plane --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PowerCons --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineCorrect --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxTW --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset RefrigerationDevices --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Rock --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ScreenType --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandGenderCh2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandMovementCh2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandSubjectCh2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShakeGestureWiimoteZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapeletSim --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapesAll --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmallKitchenAppliances --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmoothSubspace --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset StarLightCurves --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Strawberry --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SwedishLeaf --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Symbols --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SyntheticControl --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation1 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation2 --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Trace --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoLeadECG --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoPatterns --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UMD --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryAll --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryX --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryY --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryZ --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wafer --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wine --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WordSynonyms --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Worms --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WormsTwoClass --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\npython train.py --backbone dilated --classifier nonlinear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Yoga --mode directly_cls --epoch 1000 --depth 10  --loss cross_entropy --save_csv_name dilated10_nonlin_single_norm_0409_ --cuda cuda:0;\n"
  },
  {
    "path": "ts_classification_methods/scripts/fcn_lin_set_norm.sh",
    "content": "python train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ACSF1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Adiac --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteX --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteY --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ArrowHead --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BME --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Beef --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BeetleFly --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BirdChicken --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CBF --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Car --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Chinatown --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ChlorineConcentration --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CinCECGTorso --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Coffee --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Computers --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketX --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketY --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Crop --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DiatomSizeReduction --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxOutlineCorrect --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxTW --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopDay --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopGame --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopWeekend --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECG200 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECG5000 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECGFiveDays --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EOGHorizontalSignal --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EOGVerticalSignal --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Earthquakes --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ElectricDevices --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EthanolLevel --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FaceAll --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FaceFour --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FacesUCR --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FiftyWords --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Fish --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FordA --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FordB --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FreezerRegularTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FreezerSmallTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Fungi --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD3 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GesturePebbleZ1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GesturePebbleZ2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPoint --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointAgeSpan --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointMaleVersusFemale --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointOldVersusYoung --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Ham --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset HandOutlines --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Haptics --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Herring --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset HouseTwenty --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InlineSkate --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectEPGRegularTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectEPGSmallTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectWingbeatSound --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ItalyPowerDemand --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset LargeKitchenAppliances --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Lightning2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Lightning7 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Mallat --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Meat --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MedicalImages --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MelbournePedestrian --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxOutlineAgeGroup --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxOutlineCorrect --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxTW --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MixedShapesRegularTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MixedShapesSmallTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MoteStrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset NonInvasiveFetalECGThorax1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset NonInvasiveFetalECGThorax2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset OSULeaf --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset OliveOil --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PLAID --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PhalangesOutlinesCorrect --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Phoneme --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PickupGestureWiimoteZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigAirwayPressure --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigArtPressure --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigCVP --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Plane --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PowerCons --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxOutlineCorrect --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxTW --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset RefrigerationDevices --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Rock --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ScreenType --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandGenderCh2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandMovementCh2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandSubjectCh2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShakeGestureWiimoteZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShapeletSim --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShapesAll --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SmallKitchenAppliances --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SmoothSubspace --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SonyAIBORobotSurface1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SonyAIBORobotSurface2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset StarLightCurves --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Strawberry --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SwedishLeaf --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Symbols --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SyntheticControl --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ToeSegmentation1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ToeSegmentation2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Trace --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset TwoLeadECG --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset TwoPatterns --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UMD --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryAll --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryX --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryY --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Wafer --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Wine --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset WordSynonyms --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Worms --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset WormsTwoClass --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Yoga --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1;\n"
  },
  {
    "path": "ts_classification_methods/scripts/fcn_lin_single_norm.sh",
    "content": "python train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Adiac --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteX --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteY --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ArrowHead --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BME --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Beef --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BeetleFly --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BirdChicken --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CBF --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Car --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Chinatown --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ChlorineConcentration --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CinCECGTorso --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Computers --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketX --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketY --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Crop --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DiatomSizeReduction --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineCorrect --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxTW --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopDay --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopGame --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopWeekend --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG200 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG5000 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECGFiveDays --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGHorizontalSignal --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGVerticalSignal --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Earthquakes --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ElectricDevices --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EthanolLevel --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceAll --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceFour --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FacesUCR --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FiftyWords --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fish --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordA --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordB --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerRegularTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerSmallTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fungi --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD3 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPoint --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointAgeSpan --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointMaleVersusFemale --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointOldVersusYoung --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Ham --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HandOutlines --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Haptics --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Herring --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HouseTwenty --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InlineSkate --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGRegularTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGSmallTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectWingbeatSound --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ItalyPowerDemand --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset LargeKitchenAppliances --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning7 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Mallat --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Meat --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MedicalImages --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MelbournePedestrian --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineAgeGroup --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineCorrect --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxTW --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesRegularTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesSmallTrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MoteStrain --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OSULeaf --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OliveOil --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PLAID --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PhalangesOutlinesCorrect --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Phoneme --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PickupGestureWiimoteZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigAirwayPressure --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigArtPressure --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigCVP --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Plane --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PowerCons --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineAgeGroup --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineCorrect --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxTW --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset RefrigerationDevices --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Rock --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ScreenType --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandGenderCh2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandMovementCh2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandSubjectCh2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShakeGestureWiimoteZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapeletSim --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapesAll --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmallKitchenAppliances --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmoothSubspace --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset StarLightCurves --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Strawberry --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SwedishLeaf --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Symbols --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SyntheticControl --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation2 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Trace --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoLeadECG --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoPatterns --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UMD --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryAll --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryX --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryY --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryZ --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wafer --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wine --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WordSynonyms --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Worms --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WormsTwoClass --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\npython train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Yoga --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1;\n"
  },
  {
    "path": "ts_classification_methods/scripts/generator_dilated.py",
    "content": "ucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n               'Beef',\n               'BeetleFly', 'BirdChicken', 'CBF', 'Car', 'Chinatown', 'ChlorineConcentration', 'CinCECGTorso', 'Coffee',\n               'Computers',\n               'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction', 'DistalPhalanxOutlineAgeGroup',\n               'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame', 'DodgerLoopWeekend',\n               'ECG200', 'ECG5000', 'ECGFiveDays', 'EOGHorizontalSignal', 'EOGVerticalSignal', 'Earthquakes',\n               'ElectricDevices',\n               'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords', 'Fish', 'FordA', 'FordB',\n               'FreezerRegularTrain',\n               'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3', 'GesturePebbleZ1',\n               'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan', 'GunPointMaleVersusFemale', 'GunPointOldVersusYoung',\n               'Ham',\n               'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain',\n               'InsectEPGSmallTrain',\n               'InsectWingbeatSound', 'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2', 'Lightning7',\n               'Mallat', 'Meat',\n               'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',\n               'MiddlePhalanxTW', 'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain',\n               'NonInvasiveFetalECGThorax1',\n               'NonInvasiveFetalECGThorax2', 'OSULeaf', 'OliveOil', 'PLAID', 'PhalangesOutlinesCorrect', 'Phoneme',\n               'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'Plane', 'PowerCons',\n               'ProximalPhalanxOutlineAgeGroup', 'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',\n               'RefrigerationDevices',\n               'Rock', 'ScreenType', 'SemgHandGenderCh2', 'SemgHandMovementCh2', 'SemgHandSubjectCh2',\n               'ShakeGestureWiimoteZ',\n               'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace', 'SonyAIBORobotSurface1',\n               'SonyAIBORobotSurface2', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',\n               'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'TwoPatterns', 'UMD',\n               'UWaveGestureLibraryAll',\n               'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine', 'WordSynonyms',\n               'Worms',\n               'WormsTwoClass', 'Yoga']\n\ni = 0\nfor dataset in ucr_dataset:\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n    '''\n        python train.py --backbone dilated --classifier linear --classifier_input 320 --depth 3 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --mode directly_cls --epoch 10  --loss cross_entropy --save_csv_name test_nonlin_set_norm_0409_ --cuda cuda:1\n        '''\n    with open('/SSD/lz/time_tsm/scripts/dilated_single_norm.sh', 'a') as f:\n        f.write('python train.py --backbone dilated --classifier linear --classifier_input 320 '\n                '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n                '--dataset ' + dataset\n                + ' --mode directly_cls --epoch 1000 --depth 3 ' +\n                ' --loss cross_entropy --save_csv_name dilated3_lin_single_norm_0409_ --cuda cuda:0' + ';\\n')\n\n\ni = 0\nfor dataset in ucr_dataset:\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n    '''\n        python train.py --backbone dilated --classifier linear --classifier_input 320 --depth 10 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --mode directly_cls --epoch 10  --loss cross_entropy --save_csv_name test_nonlin_set_norm_0409_ --cuda cuda:1\n        '''\n    with open('/SSD/lz/time_tsm/scripts/dilated_single_norm.sh', 'a') as f:\n        f.write('python train.py --backbone dilated --classifier linear --classifier_input 320 '\n                '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n                '--dataset ' + dataset\n                + ' --mode directly_cls --epoch 1000 --depth 10 ' +\n                ' --loss cross_entropy --save_csv_name dilated10_lin_single_norm_0409_ --cuda cuda:0' + ';\\n')\n\n\n\n## nohup ./scripts/dilated_single_norm.sh &"
  },
  {
    "path": "ts_classification_methods/scripts/generator_fcn.py",
    "content": "ucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n               'Beef',\n               'BeetleFly', 'BirdChicken', 'CBF', 'Car', 'Chinatown', 'ChlorineConcentration', 'CinCECGTorso', 'Coffee',\n               'Computers',\n               'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction', 'DistalPhalanxOutlineAgeGroup',\n               'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame', 'DodgerLoopWeekend',\n               'ECG200', 'ECG5000', 'ECGFiveDays', 'EOGHorizontalSignal', 'EOGVerticalSignal', 'Earthquakes',\n               'ElectricDevices',\n               'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords', 'Fish', 'FordA', 'FordB',\n               'FreezerRegularTrain',\n               'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3', 'GesturePebbleZ1',\n               'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan', 'GunPointMaleVersusFemale', 'GunPointOldVersusYoung',\n               'Ham',\n               'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain',\n               'InsectEPGSmallTrain',\n               'InsectWingbeatSound', 'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2', 'Lightning7',\n               'Mallat', 'Meat',\n               'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',\n               'MiddlePhalanxTW', 'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain',\n               'NonInvasiveFetalECGThorax1',\n               'NonInvasiveFetalECGThorax2', 'OSULeaf', 'OliveOil', 'PLAID', 'PhalangesOutlinesCorrect', 'Phoneme',\n               'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'Plane', 'PowerCons',\n               'ProximalPhalanxOutlineAgeGroup', 'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',\n               'RefrigerationDevices',\n               'Rock', 'ScreenType', 'SemgHandGenderCh2', 'SemgHandMovementCh2', 'SemgHandSubjectCh2',\n               'ShakeGestureWiimoteZ',\n               'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace', 'SonyAIBORobotSurface1',\n               'SonyAIBORobotSurface2', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',\n               'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'TwoPatterns', 'UMD',\n               'UWaveGestureLibraryAll',\n               'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine', 'WordSynonyms',\n               'Worms',\n               'WormsTwoClass', 'Yoga']\n\ni = 0\nfor dataset in ucr_dataset:\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n    # '''\n    # python train.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ACSF1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_nonlin_set_norm_0404_ --cuda cuda:1\n    # '''\n    # with open('/SSD/lz/time_tsm/scripts/fcn_nonlin_set_norm.sh', 'a') as f:\n    #     f.write('python train.py --backbone fcn --classifier nonlinear --classifier_input 128 '\n    #             '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set '\n    #             '--dataset ' + dataset\n    #             + ' --mode directly_cls --epoch 1000 ' +\n    #             ' --loss cross_entropy --save_csv_name fcn_nonlin_set_norm_0404_ --cuda cuda:1' + ';\\n')\n    #\n    # '''\n    # python train.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_nonlin_single_norm_0404_ --cuda cuda:1;\n    # '''\n    # with open('/SSD/lz/time_tsm/scripts/fcn_nonlin_single_norm.sh', 'a') as f:\n    #     f.write('python train.py --backbone fcn --classifier nonlinear --classifier_input 128 '\n    #             '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n    #             '--dataset ' + dataset\n    #             + ' --mode directly_cls --epoch 1000 ' +\n    #             ' --loss cross_entropy --save_csv_name fcn_nonlin_single_norm_0404_ --cuda cuda:1' + ';\\n')\n\n    '''\n        python train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ACSF1 --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_nonlin_set_norm_0404_ --cuda cuda:1\n        '''\n    # with open('/SSD/lz/time_tsm/scripts/fcn_lin_set_norm.sh', 'a') as f:\n    #     f.write('python train.py --backbone fcn --classifier linear --classifier_input 128 '\n    #             '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set '\n    #             '--dataset ' + dataset\n    #             + ' --mode directly_cls --epoch 1000 ' +\n    #             ' --loss cross_entropy --save_csv_name fcn_lin_set_norm_0407_ --cuda cuda:1' + ';\\n')\n\n    '''\n    python train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode directly_cls --epoch 1000  --loss cross_entropy --save_csv_name fcn_nonlin_single_norm_0404_ --cuda cuda:1;\n    '''\n    with open('/SSD/lz/time_tsm/scripts/fcn_lin_single_norm.sh', 'a') as f:\n        f.write('python train.py --backbone fcn --classifier linear --classifier_input 128 '\n                '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n                '--dataset ' + dataset\n                + ' --mode directly_cls --epoch 1000 ' +\n                ' --loss cross_entropy --save_csv_name fcn_lin_single_norm_0311_ --cuda cuda:1' + ';\\n')\n\n\n\n## nohup ./scripts/fcn_lin_set_norm.sh &\n## nohup ./scripts/fcn_lin_single_norm.sh &"
  },
  {
    "path": "ts_classification_methods/scripts/generator_pretrain_cls.py",
    "content": "source_datasets = ['Crop', 'ElectricDevices', 'StarLightCurves', 'Wafer', 'ECG5000', 'TwoPatterns', 'FordA',\n                   'UWaveGestureLibraryAll', 'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ',\n                   'FordB', 'ChlorineConcentration', 'NonInvasiveFetalECGThorax1', 'NonInvasiveFetalECGThorax2']\n\ntarget_min_datasets = ['BirdChicken', 'BeetleFly', 'Coffee', 'OliveOil', 'Beef', 'Rock', 'ShakeGestureWiimoteZ',\n                       'PickupGestureWiimoteZ', 'Wine', 'FaceFour', 'Meat', 'Car', 'Lightning2', 'Herring',\n                       'Lightning7']\n\ntarget_med_datasets = ['Earthquakes', 'Haptics', 'Computers', 'DistalPhalanxTW', 'DistalPhalanxOutlineAgeGroup',\n                       'MiddlePhalanxTW', 'MiddlePhalanxOutlineAgeGroup',\n                       'SyntheticControl', 'ProximalPhalanxTW', 'ProximalPhalanxOutlineAgeGroup',\n                       'SonyAIBORobotSurface1', 'InlineSkate', 'EOGVerticalSignal', 'EOGHorizontalSignal',\n                       'SmallKitchenAppliances']\n\ntarget_max_datasets = ['MoteStrain', 'HandOutlines', 'CinCECGTorso', 'Phoneme', 'InsectWingbeatSound', 'FacesUCR',\n                       'FaceAll',\n                       'Mallat', 'MixedShapesSmallTrain', 'PhalangesOutlinesCorrect', 'FreezerSmallTrain',\n                       'MixedShapesRegularTrain', 'FreezerRegularTrain', 'Yoga', 'MelbournePedestrian']\n\ntarget_datasets = target_min_datasets + target_med_datasets + target_max_datasets\nprint(target_datasets)\nprint(len(source_datasets), len(target_datasets))\n\ni = 0\nfor dataset in source_datasets:  ## cls pretrain\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n    '''\n    python train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode pretrain --epoch 20  --loss cross_entropy --cuda cuda:1;\n    '''\n    with open('/SSD/lz/time_tsm/scripts/transfer_pretrain_finetune.sh', 'a') as f:\n        f.write(\n            'python train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n            '--dataroot /SSD/lz/UCRArchive_2018 '\n            '--dataset ' + dataset\n            + ' --mode pretrain --epoch 2000 --classifier linear' +\n            ' --loss cross_entropy --cuda cuda:1' + ';\\n')\n\ni = 0\nfor dataset in source_datasets:  ## rec fcn pretrain\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n    '''\n      python train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode pretrain --epoch 20  --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\n    '''\n    with open('/SSD/lz/time_tsm/scripts/transfer_pretrain_finetune.sh', 'a') as f:\n        f.write(\n            'python train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n            '--dataroot /SSD/lz/UCRArchive_2018 '\n            '--dataset ' + dataset\n            + ' --mode pretrain --epoch 2000 --classifier linear' +\n            ' --loss reconstruction --decoder_backbone fcn --cuda cuda:1' + ';\\n')\n\ni = 0\nfor dataset in source_datasets:  ## rec rnn pretrain\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n\n    '''\n     python train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode pretrain --epoch 20  --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\n    '''\n    with open('/SSD/lz/time_tsm/scripts/transfer_pretrain_finetune.sh', 'a') as f:\n        f.write(\n            'python train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n            '--dataroot /SSD/lz/UCRArchive_2018 '\n            '--dataset ' + dataset\n            + ' --mode pretrain --epoch 2000 --classifier linear' +\n            ' --loss reconstruction --decoder_backbone rnn --cuda cuda:1' + ';\\n')\n\ni = 0\nfor source_dataset in source_datasets:  ## cls finetune\n    print(\"i = \", i, \"dataset_name = \", source_dataset)\n    i = i + 1\n    for target_dataset in target_datasets:\n        ### finetune cls\n        '''\n         python train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode finetune --epoch 20  --loss cross_entropy --source_dataset Coffee --transfer_strategy classification --cuda cuda:1 --save_csv_name test_fcn_nonlin_single_norm_0409_;\n        '''\n        with open('/SSD/lz/time_tsm/scripts/transfer_pretrain_finetune.sh', 'a') as f:\n            f.write(\n                'python train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n                '--dataroot /SSD/lz/UCRArchive_2018 '\n                '--dataset ' + target_dataset\n                + ' --mode finetune --epoch 1000 --classifier linear' +\n                ' --loss cross_entropy --source_dataset ' + source_dataset + ' --transfer_strategy classification '\n                                                                             '--cuda cuda:1 --save_csv_name ' + source_dataset + '_finetune_cls_0409_' + ';\\n')\n\ni = 0\nfor source_dataset in source_datasets:  ## rec fcn finetune\n    print(\"i = \", i, \"dataset_name = \", source_dataset)\n    i = i + 1\n    for target_dataset in target_datasets:\n        ### finetune rec fcn\n        '''\n        python train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode finetune --epoch 20  --loss cross_entropy --decoder_backbone fcn --source_dataset Coffee --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name test_fcn_nonlin_single_norm_0409_;\n        '''\n        with open('/SSD/lz/time_tsm/scripts/transfer_pretrain_finetune.sh', 'a') as f:\n            f.write(\n                'python train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n                '--dataroot /SSD/lz/UCRArchive_2018 '\n                '--dataset ' + target_dataset\n                + ' --mode finetune --epoch 1000 --classifier linear' +\n                ' --loss cross_entropy --decoder_backbone fcn --source_dataset ' + source_dataset + ' --transfer_strategy reconstruction '\n                                                                                                    '--cuda cuda:1 --save_csv_name ' + source_dataset + '_finetune_rec_fcn_0409_' + ';\\n')\n\ni = 0\nfor source_dataset in source_datasets:  ## rec rnn finetune\n    print(\"i = \", i, \"dataset_name = \", source_dataset)\n    i = i + 1\n    for target_dataset in target_datasets:\n        ### finetune rec rnn\n        '''\n        python train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --mode finetune --epoch 20  --loss cross_entropy --decoder_backbone rnn --source_dataset Coffee --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name test_fcn_nonlin_single_norm_0409_;\n        '''\n        with open('/SSD/lz/time_tsm/scripts/transfer_pretrain_finetune.sh', 'a') as f:\n            f.write(\n                'python train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n                '--dataroot /SSD/lz/UCRArchive_2018 '\n                '--dataset ' + target_dataset\n                + ' --mode finetune --epoch 1000 --classifier linear' +\n                ' --loss cross_entropy --decoder_backbone rnn --source_dataset ' + source_dataset + ' --transfer_strategy reconstruction '\n                                                                                                    '--cuda cuda:1 --save_csv_name ' + source_dataset + '_finetune_rec_rnn_0409_' + ';\\n')\n\n## nohup ./scripts/transfer_pretrain_finetune.sh &\n"
  },
  {
    "path": "ts_classification_methods/scripts/transfer_pretrain_finetune.sh",
    "content": "python train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Crop --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ElectricDevices --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset StarLightCurves --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wafer --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ECG5000 --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset TwoPatterns --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FordA --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryAll --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryX --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryY --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryZ --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FordB --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ChlorineConcentration --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset NonInvasiveFetalECGThorax1 --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset NonInvasiveFetalECGThorax2 --mode pretrain --epoch 2000 --classifier linear --loss cross_entropy --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Crop --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ElectricDevices --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset StarLightCurves --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wafer --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ECG5000 --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset TwoPatterns --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FordA --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryAll --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryX --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryY --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryZ --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FordB --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ChlorineConcentration --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset NonInvasiveFetalECGThorax1 --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset NonInvasiveFetalECGThorax2 --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone fcn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Crop --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ElectricDevices --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset StarLightCurves --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wafer --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ECG5000 --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset TwoPatterns --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FordA --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryAll --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryX --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryY --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset UWaveGestureLibraryZ --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FordB --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ChlorineConcentration --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset NonInvasiveFetalECGThorax1 --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task reconstruction --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset NonInvasiveFetalECGThorax2 --mode pretrain --epoch 2000 --classifier linear --loss reconstruction --decoder_backbone rnn --cuda cuda:1;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Crop --transfer_strategy classification --cuda cuda:1 --save_csv_name Crop_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ElectricDevices --transfer_strategy classification --cuda cuda:1 --save_csv_name ElectricDevices_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset StarLightCurves --transfer_strategy classification --cuda cuda:1 --save_csv_name StarLightCurves_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset Wafer --transfer_strategy classification --cuda cuda:1 --save_csv_name Wafer_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ECG5000 --transfer_strategy classification --cuda cuda:1 --save_csv_name ECG5000_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset TwoPatterns --transfer_strategy classification --cuda cuda:1 --save_csv_name TwoPatterns_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordA --transfer_strategy classification --cuda cuda:1 --save_csv_name FordA_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryAll --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryX --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryY --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset UWaveGestureLibraryZ --transfer_strategy classification --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset FordB --transfer_strategy classification --cuda cuda:1 --save_csv_name FordB_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset ChlorineConcentration --transfer_strategy classification --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy classification --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_cls_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone fcn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_fcn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Crop --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Crop_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ElectricDevices --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ElectricDevices_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset StarLightCurves --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name StarLightCurves_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset Wafer --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name Wafer_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ECG5000 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ECG5000_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset TwoPatterns --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name TwoPatterns_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordA --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordA_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryAll --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryAll_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryX --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryX_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryY --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryY_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset UWaveGestureLibraryZ --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name UWaveGestureLibraryZ_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset FordB --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name FordB_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset ChlorineConcentration --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name ChlorineConcentration_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax1 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax1_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BirdChicken --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset BeetleFly --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Coffee --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset OliveOil --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Beef --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Rock --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ShakeGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PickupGestureWiimoteZ --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Wine --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceFour --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Meat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Car --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning2 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Herring --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Lightning7 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Earthquakes --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Haptics --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Computers --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset DistalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MiddlePhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SyntheticControl --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxTW --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset ProximalPhalanxOutlineAgeGroup --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SonyAIBORobotSurface1 --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InlineSkate --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGVerticalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset EOGHorizontalSignal --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset SmallKitchenAppliances --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MoteStrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset HandOutlines --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset CinCECGTorso --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Phoneme --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset InsectWingbeatSound --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FacesUCR --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FaceAll --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Mallat --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset PhalangesOutlinesCorrect --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerSmallTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MixedShapesRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset FreezerRegularTrain --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset Yoga --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\npython train.py --backbone fcn --task classification --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroot /SSD/lz/UCRArchive_2018 --dataset MelbournePedestrian --mode finetune --epoch 1000 --classifier linear --loss cross_entropy --decoder_backbone rnn --source_dataset NonInvasiveFetalECGThorax2 --transfer_strategy reconstruction --cuda cuda:1 --save_csv_name NonInvasiveFetalECGThorax2_finetune_rec_rnn_0409_;\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/CricketX_config.json",
    "content": "{\n    \"piece_size\": 0.2,\n    \"class_type\": \"3C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/DodgerLoopDay_config.json",
    "content": "{\n    \"piece_size\": 0.35,\n    \"class_type\": \"5C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/InsectWingbeatSound_config.json",
    "content": "{\n    \"piece_size\": 0.4,\n    \"class_type\": \"6C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/MFPT_config.json",
    "content": "{\n    \"piece_size\": 0.2,\n    \"class_type\": \"4C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/UWaveGestureLibraryAll_config.json",
    "content": "{\n    \"piece_size\": 0.2,\n    \"class_type\": \"4C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/XJTU_config.json",
    "content": "{\n    \"piece_size\": 0.2,\n    \"class_type\": \"4C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/dataloader/TSC_data_loader.py",
    "content": "\nfrom sklearn import preprocessing\nimport numpy as np\n\n\ndef set_nan_to_zero(a):\n    where_are_NaNs = np.isnan(a)\n    a[where_are_NaNs] = 0\n    return a\n\n\ndef TSC_data_loader(dataset_path,dataset_name):\n    print(\"[INFO] {}\".format(dataset_name))\n\n    Train_dataset = np.loadtxt(\n        dataset_path + '/' + dataset_name + '/' + dataset_name + '_TRAIN.tsv')\n    Test_dataset = np.loadtxt(\n        dataset_path + '/' + dataset_name + '/' + dataset_name + '_TEST.tsv')\n    Train_dataset = Train_dataset.astype(np.float32)\n    Test_dataset = Test_dataset.astype(np.float32)\n\n    X_train = Train_dataset[:, 1:]\n    y_train = Train_dataset[:, 0:1]\n\n    X_test = Test_dataset[:, 1:]\n    y_test = Test_dataset[:, 0:1]\n    le = preprocessing.LabelEncoder()\n    le.fit(np.squeeze(y_train, axis=1))\n    y_train = le.transform(np.squeeze(y_train, axis=1))\n    y_test = le.transform(np.squeeze(y_test, axis=1))\n    return set_nan_to_zero(X_train), y_train, set_nan_to_zero(X_test), y_test\n\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/dataloader/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/dataloader/ucr2018.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch.utils.data as data\n'''\nfrom TSC_data_loader import TSC_data_loader\nfrom dataprepare import *\nimport sys\nsys.path.append('..')\n'''\nimport utils.datasets as ds\nimport torch\nfrom dataloader.TSC_data_loader import TSC_data_loader\n\n\n\nclass UCR2018(data.Dataset):\n\n    def __init__(self, data, targets, transform):\n        self.data = np.asarray(data, dtype=np.float32)\n        self.targets = np.asarray(targets, dtype=np.int64)\n        self.transform = transform\n\n    def __getitem__(self, index):\n        img, target = self.data[index], self.targets[index]\n        if self.transform is not None:\n            img_transformed = self.transform(img.copy())\n        else:\n            img_transformed = img\n\n        return img_transformed, target\n\n    def __len__(self):\n        return self.data.shape[0]\n\n\nclass MultiUCR2018_Intra(data.Dataset):\n\n    def __init__(self, data, targets, K, transform, transform_cut, totensor_transform):\n        self.data = np.asarray(data, dtype=np.float32)\n        self.targets = np.asarray(targets, dtype=np.int16)\n        self.K = K  # tot number of augmentations\n        self.transform = transform\n        self.transform_cut = transform_cut\n        self.totensor_transform = totensor_transform\n\n    def __getitem__(self, index):\n        # print(\"### {}\".format(index))\n        img, target = self.data[index], self.targets[index]\n        img_list0 = list()\n        img_list1 = list()\n        label_list = list()\n\n        if self.transform is not None:\n            for _ in range(self.K):\n                img_transformed = self.transform(img.copy())\n                img_cut0, img_cut1, label = self.transform_cut(img_transformed)\n                img_list0.append(self.totensor_transform(img_cut0))\n                img_list1.append(self.totensor_transform(img_cut1))\n                label = torch.from_numpy(np.array(label)).cuda()\n                label_list.append(label)\n\n        return img_list0, img_list1, label_list, target\n\n    def __len__(self):\n        return self.data.shape[0]\n\n\nclass MultiUCR2018_InterIntra(data.Dataset):\n\n    def __init__(self, data, targets, K, transform, transform_cut, totensor_transform):\n        self.data = np.asarray(data, dtype=np.float32)\n        self.targets = np.asarray(targets, dtype=np.int16)\n        self.K = K  # tot number of augmentations\n        self.transform = transform\n        self.transform_cut = transform_cut\n        self.totensor_transform = totensor_transform\n\n    def __getitem__(self, index):\n        # print(\"### {}\".format(index))\n        img, target = self.data[index], self.targets[index]\n        img_list = list()\n        img_list0 = list()\n        img_list1 = list()\n        label_list = list()\n\n        if self.transform is not None:\n            for _ in range(self.K):\n                img_transformed = self.transform(img.copy())\n                img_cut0, img_cut1, label = self.transform_cut(img_transformed)\n                img_list.append(self.totensor_transform(img_transformed))\n                img_list0.append(self.totensor_transform(img_cut0))\n                img_list1.append(self.totensor_transform(img_cut1))\n                label = torch.from_numpy(np.array(label)).cuda()\n                label_list.append(label)\n            #label_list = torch.from_numpy(np.array(label_list)).cuda()\n        return img_list, img_list0, img_list1, label_list, target\n\n    def __len__(self):\n        return self.data.shape[0]\n\n\nclass MultiUCR2018(data.Dataset):\n\n    def __init__(self, data, targets, K, transform):\n        self.data = np.asarray(data, dtype=np.float32)\n        self.targets = np.asarray(targets, dtype=np.int16)\n        self.K = K  # tot number of augmentations\n        self.transform = transform\n\n    def __getitem__(self, index):\n        # print(\"### {}\".format(index))\n        img, target = self.data[index], self.targets[index]\n        img_list = list()\n        if self.transform is not None:\n            for _ in range(self.K):\n                img_transformed = self.transform(img.copy())\n                img_list.append(img_transformed)\n        else:\n            img_list = img\n\n        return img_list, target\n\n    def __len__(self):\n        return self.data.shape[0]\n\n\ndef load_ucr2018(dataset_path, dataset_name):\n    ##################\n    # load raw data\n    ##################\n    nb_class = ds.nb_classes(dataset_name)\n    nb_dims = ds.nb_dims(dataset_name)\n\n    if dataset_name in ['MFPT', 'XJTU']:\n        x = np.load(\"{}/{}/{}_data.npy\".format(dataset_path, dataset_name, dataset_name))\n        y = np.load(\"{}/{}/{}_label.npy\".format(dataset_path, dataset_name, dataset_name))\n\n        (x_train, x_test)=(x[:100], x[100:])\n        (y_train, y_test)=(y[:100], y[100:])\n\n    else:\n        x_train, y_train, x_test, y_test = TSC_data_loader(dataset_path, dataset_name)\n\n    nb_timesteps = int(x_train.shape[1] / nb_dims)\n    input_shape = (nb_timesteps, nb_dims)\n\n    ############################################\n    # Combine all train and test data for resample\n    ############################################\n\n    x_all = np.concatenate((x_train, x_test), axis=0)\n    y_all = np.concatenate((y_train, y_test), axis=0)\n    ts_idx = list(range(x_all.shape[0]))\n    np.random.shuffle(ts_idx)\n    x_all = x_all[ts_idx]\n    y_all = y_all[ts_idx]\n\n    label_idxs = np.unique(y_all)\n    class_stat_all = {}\n    for idx in label_idxs:\n        class_stat_all[idx] = len(np.where(y_all == idx)[0])\n    print(\"[Stat] All class: {}\".format(class_stat_all))\n\n    test_idx = []\n    val_idx = []\n    train_idx = []\n    for idx in label_idxs:\n        target = list(np.where(y_all == idx)[0])\n        nb_samp = int(len(target))\n        test_idx += target[:int(nb_samp * 0.25)]\n        val_idx += target[int(nb_samp * 0.25):int(nb_samp * 0.5)]\n        train_idx += target[int(nb_samp * 0.5):]\n\n    x_test = x_all[test_idx]\n    y_test = y_all[test_idx]\n    x_val = x_all[val_idx]\n    y_val = y_all[val_idx]\n    x_train = x_all[train_idx]\n    y_train = y_all[train_idx]\n\n    label_idxs = np.unique(y_train)\n    class_stat = {}\n    for idx in label_idxs:\n        class_stat[idx] = len(np.where(y_train == idx)[0])\n    # print(\"[Stat] Train class: {}\".format(class_stat))\n    print(\"[Stat] Train class: mean={}, std={}\".format(np.mean(list(class_stat.values())),\n                                                       np.std(list(class_stat.values()))))\n\n    label_idxs = np.unique(y_val)\n    class_stat = {}\n    for idx in label_idxs:\n        class_stat[idx] = len(np.where(y_val == idx)[0])\n    # print(\"[Stat] Test class: {}\".format(class_stat))\n    print(\"[Stat] Val class: mean={}, std={}\".format(np.mean(list(class_stat.values())),\n                                                     np.std(list(class_stat.values()))))\n\n    label_idxs = np.unique(y_test)\n    class_stat = {}\n    for idx in label_idxs:\n        class_stat[idx] = len(np.where(y_test == idx)[0])\n    # print(\"[Stat] Test class: {}\".format(class_stat))\n    print(\"[Stat] Test class: mean={}, std={}\".format(np.mean(list(class_stat.values())),\n                                                      np.std(list(class_stat.values()))))\n\n    ########################################\n    # Data Split End\n    ########################################\n\n    # Process data\n    x_test = x_test.reshape((-1, input_shape[0], input_shape[1]))\n    x_val = x_val.reshape((-1, input_shape[0], input_shape[1]))\n    x_train = x_train.reshape((-1, input_shape[0], input_shape[1]))\n\n    print(\"Train:{}, Test:{}, Class:{}\".format(x_train.shape, x_test.shape, nb_class))\n\n    # Normalize\n    x_train_max = np.max(x_train)\n    x_train_min = np.min(x_train)\n    x_train = 2. * (x_train - x_train_min) / (x_train_max - x_train_min) - 1.\n    # Test is secret\n    x_val = 2. * (x_val - x_train_min) / (x_train_max - x_train_min) - 1.\n    x_test = 2. * (x_test - x_train_min) / (x_train_max - x_train_min) - 1.\n\n    return x_train, y_train, x_val, y_val, x_test, y_test, nb_class, class_stat_all\n\n\nif __name__ == '__main__':\n    x_train, y_train, x_val, y_val, x_test, y_test, nb_class, class_stat_all = load_ucr2018('/dev_data/zzj/hzy/datasets/UCR', 'Crop')\n    \n    print(y_train[0].shape)\n\n    x_train, y_train, x_val = load_data('/dev_data/zzj/hzy/datasets/UCR', 'Crop')\n    print(y_train[0].shape)"
  },
  {
    "path": "ts_classification_methods/selftime_cls/dataprepare.py",
    "content": "import pandas as pd\nfrom sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\nfrom sklearn import preprocessing\nimport numpy as np\nimport os\n\ndef load_data(dataroot, dataset):\n    train = pd.read_csv(os.path.join(dataroot, dataset, dataset+'_TRAIN.tsv'), sep='\\t', header=None)\n    train_x = train.iloc[:, 1:]\n    train_target = train.iloc[:, 0]\n\n    test = pd.read_csv(os.path.join(dataroot, dataset, dataset+'_TEST.tsv'), sep='\\t', header=None)\n    test_x = test.iloc[:, 1:]\n    test_target = test.iloc[:, 0]\n\n    \n    sum_dataset = pd.concat([train_x, test_x]).to_numpy(np.float32)\n    #sum_dataset = sum_dataset.fillna(sum_dataset.mean()).to_numpy(dtype=np.float32)\n    sum_target = pd.concat([train_target, test_target]).to_numpy(np.float32)\n    # sum_target = sum_target.fillna(sum_target.mean()).to_numpy(dtype=np.float32)\n    \n    \n    num_classes = len(np.unique(sum_target))\n    sum_target = transfer_labels(sum_target)\n\n    sum_dataset = np.expand_dims(sum_dataset, 2)\n    return sum_dataset, sum_target, num_classes\n\ndef transfer_labels(labels):\n    indicies = np.unique(labels)\n    num_samples = labels.shape[0]\n\n    for i in range(num_samples):\n        new_label = np.argwhere(labels[i] == indicies)[0][0]\n        labels[i] = new_label\n    \n    return labels\n\ndef k_fold(data, target):\n    skf = StratifiedKFold(5, shuffle=True, random_state=42)\n    #skf = StratifiedShuffleSplit(5)\n    train_sets = []\n    train_targets = []\n\n    val_sets = []\n    val_targets = []\n\n    test_sets = []\n    test_targets = []\n\n    for raw_index, test_index in skf.split(data, target):\n        raw_set = data[raw_index]\n        raw_target = target[raw_index]\n\n        test_sets.append(data[test_index])\n        test_targets.append(target[test_index])\n\n        train_index, val_index = next(StratifiedKFold(4, shuffle=True, random_state=42).split(raw_set, raw_target))\n        # train_index, val_index = next(StratifiedShuffleSplit(1).split(raw_set, raw_target))\n        train_sets.append(raw_set[train_index])\n        train_targets.append(raw_target[train_index])\n\n        val_sets.append(raw_set[val_index])\n        val_targets.append(raw_target[val_index])\n\n    return np.array(train_sets), np.array(train_targets), np.array(val_sets), np.array(val_targets), np.array(test_sets), np.array(test_targets)\n\n\ndef normalize_per_series(data):\n    std_ = data.std(axis=1, keepdims=True)\n    return (data - data.mean(axis=1, keepdims=True)) / std_\n\n\ndef fill_nan_value(train_set, val_set, test_set):\n\n    ind = np.where(np.isnan(train_set))\n    col_mean = np.nanmean(train_set, axis=0)\n    col_mean[np.isnan(col_mean)] = 1e-6\n\n    train_set[ind] = np.take(col_mean, ind[1])\n    \n    ind_val = np.where(np.isnan(val_set))\n    val_set[ind_val] = np.take(col_mean, ind_val[1])\n\n    ind_test = np.where(np.isnan(test_set))\n    test_set[ind_test] = np.take(col_mean, ind_test[1])\n    return train_set, val_set, test_set\n\n\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/evaluation/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/evaluation/eval_ssl.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport torch\nimport utils.transforms as transforms\nfrom dataloader.ucr2018 import UCR2018\nimport torch.utils.data as data\nfrom optim.pytorchtools import EarlyStopping\nfrom model.model_backbone import SimConv4\n\n\ndef evaluation(x_train, y_train, x_val, y_val, x_test, y_test, nb_class, ckpt, opt, in_channel=1, ckpt_tosave=None, my_state=None):\n    # no augmentations used for linear evaluation\n    transform_lineval = transforms.Compose([transforms.ToTensor()])\n\n    train_set_lineval = UCR2018(data=x_train, targets=y_train, transform=transform_lineval)\n    val_set_lineval = UCR2018(data=x_val, targets=y_val, transform=transform_lineval)\n    test_set_lineval = UCR2018(data=x_test, targets=y_test, transform=transform_lineval)\n\n    train_loader_lineval = torch.utils.data.DataLoader(train_set_lineval, batch_size=128, shuffle=True)\n    val_loader_lineval = torch.utils.data.DataLoader(val_set_lineval, batch_size=128, shuffle=False)\n    test_loader_lineval = torch.utils.data.DataLoader(test_set_lineval, batch_size=128, shuffle=False)\n    signal_length = x_train.shape[1]\n\n    # loading the saved backbone\n    backbone_lineval = SimConv4(in_channel).cuda()  # defining a raw backbone model\n    # backbone_lineval = OS_CNN(signal_length).cuda()  # defining a raw backbone model\n\n    # 64 are the number of output features in the backbone, and 10 the number of classes\n    linear_layer = torch.nn.Linear(opt.feature_size, nb_class).cuda()\n    # linear_layer = torch.nn.Linear(backbone_lineval.rep_dim, nb_class).cuda()\n\n    if ckpt != None:\n        checkpoint = torch.load(ckpt, map_location='cpu')\n    else:\n        checkpoint = my_state\n    backbone_lineval.load_state_dict(checkpoint)\n    if ckpt_tosave:\n        torch.save(backbone_lineval.state_dict(), ckpt_tosave)\n\n    optimizer = torch.optim.Adam(linear_layer.parameters(), lr=0.5)#lr=opt.learning_rate_test)\n    CE = torch.nn.CrossEntropyLoss()\n\n    early_stopping = EarlyStopping(100, verbose=True) # opt.patience\n    best_acc = 0\n    best_epoch = 0\n\n    print('Linear evaluation')\n    for epoch in range(400): # opt.epoch_test\n        linear_layer.train()\n        backbone_lineval.eval()\n\n        acc_trains = list()\n        for i, (data, target) in enumerate(train_loader_lineval):\n            optimizer.zero_grad()\n            data = data.cuda()\n            target = target.cuda()\n\n            output = backbone_lineval(data).detach()\n            output = linear_layer(output)\n            loss = CE(output, target)\n            loss.backward()\n            optimizer.step()\n            # estimate the accuracy\n            prediction = output.argmax(-1)\n            correct = prediction.eq(target.view_as(prediction)).sum()\n            accuracy = (100.0 * correct / len(target))\n            acc_trains.append(accuracy.item())\n\n        print('[Train-{}][{}] loss: {:.5f}; \\t Acc: {:.2f}%' \\\n              .format(epoch + 1, opt.model_name, loss.item(), sum(acc_trains) / len(acc_trains)))\n\n        acc_vals = list()\n        acc_tests = list()\n        linear_layer.eval()\n        with torch.no_grad():\n            for i, (data, target) in enumerate(val_loader_lineval):\n                data = data.cuda()\n                target = target.cuda()\n\n                output = backbone_lineval(data).detach()\n                output = linear_layer(output)\n                # estimate the accuracy\n                prediction = output.argmax(-1)\n                correct = prediction.eq(target.view_as(prediction)).sum()\n                accuracy = (100.0 * correct / len(target))\n                acc_vals.append(accuracy.item())\n\n            val_acc = sum(acc_vals) / len(acc_vals)\n            if val_acc > best_acc:\n                best_acc = val_acc\n                best_epoch = epoch\n                for i, (data, target) in enumerate(test_loader_lineval):\n                    data = data.cuda()\n                    target = target.cuda()\n\n                    output = backbone_lineval(data).detach()\n                    output = linear_layer(output)\n                    # estimate the accuracy\n                    prediction = output.argmax(-1)\n                    correct = prediction.eq(target.view_as(prediction)).sum()\n                    accuracy = (100.0 * correct / len(target))\n                    acc_tests.append(accuracy.item())\n\n                test_acc = sum(acc_tests) / len(acc_tests)\n\n        print('[Test-{}] Val ACC:{:.2f}%, Best Test ACC.: {:.2f}% in Epoch {}'.format(\n            epoch, val_acc, test_acc, best_epoch))\n        early_stopping(val_acc, None)\n        if early_stopping.early_stop:\n            print(\"Early stopping\")\n            break\n\n    return test_acc, best_epoch\n\n\n\n\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/model/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/model/model_RelationalReasoning.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport torch\nfrom optim.pytorchtools import EarlyStopping\nimport torch.nn as nn\n\n\nclass RelationalReasoning(torch.nn.Module):\n\n  def __init__(self, backbone, feature_size=64):\n    super(RelationalReasoning, self).__init__()\n    self.backbone = backbone\n    self.relation_head = torch.nn.Sequential(\n                             torch.nn.Linear(feature_size*2, 256),\n                             torch.nn.BatchNorm1d(256),\n                             torch.nn.LeakyReLU(),\n                             torch.nn.Linear(256, 1))\n\n  def aggregate(self, features, K):\n    relation_pairs_list = list()\n    targets_list = list()\n    size = int(features.shape[0] / K)\n    shifts_counter=1\n    for index_1 in range(0, size*K, size):\n      for index_2 in range(index_1+size, size*K, size):\n        # Using the 'cat' aggregation function by default\n        pos1 = features[index_1:index_1 + size]\n        pos2 = features[index_2:index_2+size]\n        pos_pair = torch.cat([pos1,\n                              pos2], 1)  # (batch_size, fz*2)\n\n        # Shuffle without collisions by rolling the mini-batch (negatives)\n        neg1 = torch.roll(features[index_2:index_2 + size],\n                          shifts=shifts_counter, dims=0)\n        neg_pair1 = torch.cat([pos1, neg1], 1) # (batch_size, fz*2)\n\n        relation_pairs_list.append(pos_pair)\n        relation_pairs_list.append(neg_pair1)\n\n        targets_list.append(torch.ones(size, dtype=torch.float32).cuda())\n        targets_list.append(torch.zeros(size, dtype=torch.float32).cuda())\n\n        shifts_counter+=1\n        if(shifts_counter>=size):\n            shifts_counter=1 # avoid identity pairs\n    relation_pairs = torch.cat(relation_pairs_list, 0).cuda()  # K(K-1) * (batch_size, fz*2)\n    targets = torch.cat(targets_list, 0).cuda()\n    return relation_pairs, targets\n\n  def train(self, tot_epochs, train_loader, opt):\n    patience = opt.patience\n    early_stopping = EarlyStopping(patience, verbose=True,\n                                   checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))\n\n    optimizer = torch.optim.Adam([\n                  {'params': self.backbone.parameters()},\n                  {'params': self.relation_head.parameters()}], lr=opt.learning_rate)\n    BCE = torch.nn.BCEWithLogitsLoss()\n    self.backbone.train()\n    self.relation_head.train()\n    epoch_max = 0\n    acc_max=0\n    for epoch in range(tot_epochs):\n\n      acc_epoch=0\n      loss_epoch=0\n      # the real target is discarded (unsupervised)\n      for i, (data_augmented, _) in enumerate(train_loader):\n        K = len(data_augmented) # tot augmentations\n        x = torch.cat(data_augmented, 0).cuda()\n\n        optimizer.zero_grad()\n        # forward pass (backbone)\n        features = self.backbone(x)\n        # aggregation function\n        relation_pairs, targets = self.aggregate(features, K)\n\n        # forward pass (relation head)\n        score = self.relation_head(relation_pairs).squeeze()\n        # cross-entropy loss and backward\n        loss = BCE(score, targets)\n        loss.backward()\n        optimizer.step()\n        # estimate the accuracy\n        predicted = torch.round(torch.sigmoid(score))\n        correct = predicted.eq(targets.view_as(predicted)).sum()\n        accuracy = (100.0 * correct / float(len(targets)))\n        acc_epoch += accuracy.item()\n        loss_epoch += loss.item()\n\n      acc_epoch /= len(train_loader)\n      loss_epoch /= len(train_loader)\n\n      if acc_epoch>acc_max:\n          acc_max = acc_epoch\n          epoch_max = epoch\n\n      early_stopping(acc_epoch, self.backbone)\n      if early_stopping.early_stop:\n          print(\"Early stopping\")\n          break\n\n      if (epoch+1)%opt.save_freq==0:\n        print(\"[INFO] save backbone at epoch {}!\".format(epoch))\n        torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch))\n\n      print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, Max ACC.= {:.1f}%, Max Epoch={}' \\\n            .format(epoch + 1, opt.model_name, opt.dataset_name,\n                    loss_epoch, acc_epoch, acc_max, epoch_max))\n    return acc_max, epoch_max\n\n\nclass RelationalReasoning_Intra(torch.nn.Module):\n\n  def __init__(self, backbone, feature_size=64, nb_class=3):\n    super(RelationalReasoning_Intra, self).__init__()\n    self.backbone = backbone\n\n    self.cls_head = torch.nn.Sequential(\n        torch.nn.Linear(feature_size*2, 256),\n        torch.nn.BatchNorm1d(256),\n        torch.nn.LeakyReLU(),\n        torch.nn.Linear(256, nb_class),\n        torch.nn.Softmax(),\n    )\n\n  def run_test(self, predict, labels):\n      correct = 0\n      pred = predict.data.max(1)[1]\n      correct = pred.eq(labels.data).cpu().sum()\n      return correct, len(labels.data)\n\n  def train(self, tot_epochs, train_loader, opt):\n    patience = opt.patience\n    early_stopping = EarlyStopping(patience, verbose=True,\n                                   checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))\n\n    optimizer = torch.optim.Adam([\n                  {'params': self.backbone.parameters()},\n        {'params': self.cls_head.parameters()},\n    ], lr=opt.learning_rate)\n    c_criterion = nn.CrossEntropyLoss()\n\n    self.backbone.train()\n    self.cls_head.train()\n    epoch_max = 0\n    acc_max=0\n    for epoch in range(tot_epochs):\n\n      acc_epoch=0\n      acc_epoch_cls=0\n      loss_epoch=0\n      # the real target is discarded (unsupervised)\n      for i, (data_augmented0, data_augmented1, data_label, _) in enumerate(train_loader):\n        K = len(data_augmented0) # tot augmentations\n        x_cut0 = torch.cat(data_augmented0, 0).cuda()\n        x_cut1 = torch.cat(data_augmented1, 0).cuda()\n        c_label = torch.cat(data_label, 0).cuda()\n\n        optimizer.zero_grad()\n        # forward pass (backbone)\n        features_cut0 = self.backbone(x_cut0)\n        features_cut1 = self.backbone(x_cut1)\n        features_cls = torch.cat([features_cut0, features_cut1], 1)\n\n        # score_intra = self.relation_head(relation_pairs_intra).squeeze()\n        c_output = self.cls_head(features_cls)\n        correct_cls, length_cls = self.run_test(c_output, c_label)\n\n        loss_c = c_criterion(c_output, c_label)\n        loss=loss_c\n\n        loss.backward()\n        optimizer.step()\n        # estimate the accuracy\n        loss_epoch += loss.item()\n\n        accuracy_cls = 100. * correct_cls / length_cls\n        acc_epoch_cls += accuracy_cls.item()\n\n      acc_epoch_cls /= len(train_loader)\n      loss_epoch /= len(train_loader)\n\n      if acc_epoch_cls>acc_max:\n          acc_max = acc_epoch_cls\n          epoch_max = epoch\n\n      early_stopping(acc_epoch_cls, self.backbone)\n      if early_stopping.early_stop:\n          print(\"Early stopping\")\n          break\n\n      if (epoch+1)%opt.save_freq==0:\n        print(\"[INFO] save backbone at epoch {}!\".format(epoch))\n        torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch))\n\n      print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, CLS.= {:.2f}%, '\n            'Max ACC.= {:.1f}%, Max Epoch={}' \\\n            .format(epoch + 1, opt.model_name, opt.dataset_name,\n                    loss_epoch, acc_epoch,acc_epoch_cls, acc_max, epoch_max))\n    return acc_max, epoch_max\n\n\nclass RelationalReasoning_InterIntra(torch.nn.Module):\n  def __init__(self, backbone, feature_size=64, nb_class=3):\n    super(RelationalReasoning_InterIntra, self).__init__()\n    self.backbone = backbone\n\n    self.relation_head = torch.nn.Sequential(\n                             torch.nn.Linear(feature_size*2, 256),\n                             torch.nn.BatchNorm1d(256),\n                             torch.nn.LeakyReLU(),\n                             torch.nn.Linear(256, 1))\n    self.cls_head = torch.nn.Sequential(\n        torch.nn.Linear(feature_size*2, 256),\n        torch.nn.BatchNorm1d(256),\n        torch.nn.LeakyReLU(),\n        torch.nn.Linear(256, nb_class),\n        torch.nn.Softmax(),\n    )\n    # self.softmax = nn.Softmax()\n\n  def aggregate(self, features, K):\n    relation_pairs_list = list()\n    targets_list = list()\n    size = int(features.shape[0] / K)\n    shifts_counter=1\n    for index_1 in range(0, size*K, size):\n      for index_2 in range(index_1+size, size*K, size):\n\n        # Using the 'cat' aggregation function by default\n        pos1 = features[index_1:index_1 + size]\n        pos2 = features[index_2:index_2+size]\n        pos_pair = torch.cat([pos1,\n                              pos2], 1)  # (batch_size, fz*2)\n\n        # Shuffle without collisions by rolling the mini-batch (negatives)\n        neg1 = torch.roll(features[index_2:index_2 + size],\n                          shifts=shifts_counter, dims=0)\n        neg_pair1 = torch.cat([pos1, neg1], 1) # (batch_size, fz*2)\n\n        relation_pairs_list.append(pos_pair)\n        relation_pairs_list.append(neg_pair1)\n\n        targets_list.append(torch.ones(size, dtype=torch.float32).cuda())\n        targets_list.append(torch.zeros(size, dtype=torch.float32).cuda())\n\n        shifts_counter+=1\n        if(shifts_counter>=size):\n            shifts_counter=1 # avoid identity pairs\n    relation_pairs = torch.cat(relation_pairs_list, 0).cuda()  # K(K-1) * (batch_size, fz*2)\n    targets = torch.cat(targets_list, 0).cuda()\n    return relation_pairs, targets\n\n  def run_test(self, predict, labels):\n      correct = 0\n      pred = predict.data.max(1)[1]\n      correct = pred.eq(labels.data).cpu().sum()\n      return correct, len(labels.data)\n\n  def train(self, tot_epochs, train_loader, opt):\n    patience = opt.patience\n    early_stopping = EarlyStopping(patience, verbose=True,\n                                   checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))\n\n    optimizer = torch.optim.Adam([\n                  {'params': self.backbone.parameters()},\n                  {'params': self.relation_head.parameters()},\n        {'params': self.cls_head.parameters()},\n    ], lr=opt.learning_rate)\n    BCE = torch.nn.BCEWithLogitsLoss()\n    c_criterion = nn.CrossEntropyLoss()\n\n    self.backbone.train()\n    self.relation_head.train()\n    self.cls_head.train()\n    epoch_max = 0\n    acc_max=0\n    for epoch in range(tot_epochs):\n\n      acc_epoch=0\n      acc_epoch_cls=0\n      loss_epoch=0\n      # the real target is discarded (unsupervised)\n      for i, (data, data_augmented0, data_augmented1, data_label, _) in enumerate(train_loader):\n        K = len(data) # tot augmentations\n        x = torch.cat(data, 0)\n        x_cut0 = torch.cat(data_augmented0, 0)\n        x_cut1 = torch.cat(data_augmented1, 0)\n        c_label = torch.cat(data_label, 0)\n      \n        optimizer.zero_grad()\n        # forward pass (backbone)\n        features = self.backbone(x)\n        features_cut0 = self.backbone(x_cut0)\n        features_cut1 = self.backbone(x_cut1)\n\n        features_cls = torch.cat([features_cut0, features_cut1], 1)\n\n        # aggregation function\n        relation_pairs, targets = self.aggregate(features, K)\n        # relation_pairs_intra, targets_intra = self.aggregate_intra(features_cut0, features_cut1, K)\n\n        # forward pass (relation head)\n        score = self.relation_head(relation_pairs).squeeze()\n        c_output = self.cls_head(features_cls)\n        correct_cls, length_cls = self.run_test(c_output, c_label)\n\n        # cross-entropy loss and backward\n        loss = BCE(score, targets)\n        loss_c = c_criterion(c_output, c_label)\n        loss+=loss_c\n\n        loss.backward()\n        optimizer.step()\n        # estimate the accuracy\n        predicted = torch.round(torch.sigmoid(score))\n        correct = predicted.eq(targets.view_as(predicted)).sum()\n        accuracy = (100.0 * correct / float(len(targets)))\n        acc_epoch += accuracy.item()\n        loss_epoch += loss.item()\n\n        accuracy_cls = 100. * correct_cls / length_cls\n        acc_epoch_cls += accuracy_cls.item()\n\n      acc_epoch /= len(train_loader)\n      acc_epoch_cls /= len(train_loader)\n      loss_epoch /= len(train_loader)\n\n      if (acc_epoch+acc_epoch_cls)>acc_max:\n          acc_max = (acc_epoch+acc_epoch_cls)\n          epoch_max = epoch\n\n      early_stopping((acc_epoch+acc_epoch_cls), self.backbone)\n      if early_stopping.early_stop:\n          print(\"Early stopping\")\n          break\n\n      if (epoch+1)%opt.save_freq==0:\n        print(\"[INFO] save backbone at epoch {}!\".format(epoch))\n        torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch))\n\n      print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, CLS.= {:.2f}%, '\n            'Max ACC.= {:.1f}%, Max Epoch={}' \\\n            .format(epoch + 1, opt.model_name, opt.dataset_name,\n                    loss_epoch, acc_epoch,acc_epoch_cls, acc_max, epoch_max))\n    return acc_max, epoch_max\n\n\n\n\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/model/model_backbone.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SimConv4(torch.nn.Module):\n    def __init__(self, in_channel=1, feature_size=64):\n        super(SimConv4, self).__init__()\n        self.feature_size = feature_size\n        self.name = \"conv4\"\n        self.in_channel = in_channel\n\n        self.layer1 = torch.nn.Sequential(\n            nn.Conv1d(self.in_channel, 8, 4, 2, 1, bias=False),\n            torch.nn.BatchNorm1d(8),\n          torch.nn.ReLU()\n        )\n\n        self.layer2 = torch.nn.Sequential(\n            nn.Conv1d(8, 16, 4, 2, 1, bias=False),\n            torch.nn.BatchNorm1d(16),\n          torch.nn.ReLU(),\n        )\n\n        self.layer3 = torch.nn.Sequential(\n            nn.Conv1d(16, 32, 4, 2, 1, bias=False),\n            torch.nn.BatchNorm1d(32),\n          torch.nn.ReLU(),\n        )\n\n        self.layer4 = torch.nn.Sequential(\n            nn.Conv1d(32, 64, 3, 2, 1, bias=False),\n            torch.nn.BatchNorm1d(64),\n          torch.nn.ReLU(),\n          torch.nn.AdaptiveAvgPool1d(1)\n        )\n\n        self.flatten = torch.nn.Flatten()\n\n        for m in self.modules():\n            if isinstance(m, torch.nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, torch.nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n            if isinstance(m, nn.Conv1d):\n                nn.init.xavier_normal_(m.weight.data)\n            #        nn.init.xavier_normal_(m.bias.data)\n            elif isinstance(m, nn.BatchNorm1d):\n                nn.init.constant_(m.weight, 1)\n                nn.init.constant_(m.bias, 0)\n            elif isinstance(m, nn.Linear):\n                nn.init.constant_(m.weight, 1)\n                nn.init.constant_(m.bias, 0)\n\n    def forward(self, x):\n        x_ = x.view(x.shape[0], 1, -1)\n\n        h = self.layer1(x_)  # (B, 1, D)->(B, 8, D/2)\n        h = self.layer2(h)  # (B, 8, D/2)->(B, 16, D/4)\n        h = self.layer3(h)  # (B, 16, D/4)->(B, 32, D/8)\n        h = self.layer4(h)  # (B, 32, D/8)->(B, 64, 1)\n        h = self.flatten(h)\n        h = F.normalize(h, dim=1)\n        return h\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/optim/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/optim/pretrain.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport torch\nimport utils.transforms as transforms\nfrom dataloader.ucr2018 import *\nimport torch.utils.data as data\nfrom model.model_RelationalReasoning import *\nfrom model.model_backbone import SimConv4\n\ndef pretrain_IntraSampleRel(x_train, y_train, opt):\n    K = opt.K\n    batch_size = opt.batch_size  # 128 has been used in the paper\n    tot_epochs = opt.epochs  # 400 has been used in the paper\n    feature_size = opt.feature_size\n    ckpt_dir = opt.ckpt_dir\n\n    prob = 0.2  # Transform Probability\n    raw = transforms.Raw()\n    cutout = transforms.Cutout(sigma=0.1, p=prob)\n    jitter = transforms.Jitter(sigma=0.2, p=prob)\n    scaling = transforms.Scaling(sigma=0.4, p=prob)\n    magnitude_warp = transforms.MagnitudeWrap(sigma=0.3, knot=4, p=prob)\n    time_warp = transforms.TimeWarp(sigma=0.2, knot=8, p=prob)\n    window_slice = transforms.WindowSlice(reduce_ratio=0.8, p=prob)\n    window_warp = transforms.WindowWarp(window_ratio=0.3, scales=(0.5, 2), p=prob)\n\n    transforms_list = {'jitter': [jitter],\n                       'cutout': [cutout],\n                       'scaling': [scaling],\n                       'magnitude_warp': [magnitude_warp],\n                       'time_warp': [time_warp],\n                       'window_slice': [window_slice],\n                       'window_warp': [window_warp],\n                       'G0': [jitter, magnitude_warp, window_slice],\n                       'G1': [jitter, time_warp, window_slice],\n                       'G2': [jitter, time_warp, window_slice, window_warp, cutout],\n                       'none': [raw]}\n\n    transforms_targets = list()\n    for name in opt.aug_type:\n        for item in transforms_list[name]:\n            transforms_targets.append(item)\n\n    train_transform = transforms.Compose(transforms_targets)\n\n    if '2C' in opt.class_type:\n        cut_piece = transforms.CutPiece2C(sigma=opt.piece_size)\n        nb_class=2\n    elif '3C' in opt.class_type:\n        cut_piece = transforms.CutPiece3C(sigma=opt.piece_size)\n        nb_class=3\n    elif '4C' in opt.class_type:\n        cut_piece = transforms.CutPiece4C(sigma=opt.piece_size)\n        nb_class=4\n    elif '5C' in opt.class_type:\n        cut_piece = transforms.CutPiece5C(sigma=opt.piece_size)\n        nb_class = 5\n    elif '6C' in opt.class_type:\n        cut_piece = transforms.CutPiece6C(sigma=opt.piece_size)\n        nb_class = 6\n    elif '7C' in opt.class_type:\n        cut_piece = transforms.CutPiece7C(sigma=opt.piece_size)\n        nb_class = 7\n    elif '8C' in opt.class_type:\n        cut_piece = transforms.CutPiece8C(sigma=opt.piece_size)\n        nb_class = 8\n\n    tensor_transform = transforms.ToTensor()\n\n    backbone = SimConv4().cuda()\n    model = RelationalReasoning_Intra(backbone, feature_size, nb_class).cuda()\n\n    train_set = MultiUCR2018_Intra(data=x_train, targets=y_train, K=K,\n                               transform=train_transform, transform_cut=cut_piece,\n                               totensor_transform=tensor_transform)\n\n    train_loader = torch.utils.data.DataLoader(train_set,\n                                               batch_size=batch_size,\n                                               shuffle=True)\n    torch.save(model.backbone.state_dict(), '{}/backbone_init.tar'.format(ckpt_dir))\n    acc_max, epoch_max = model.train(tot_epochs=tot_epochs, train_loader=train_loader, opt=opt)\n\n    torch.save(model.backbone.state_dict(), '{}/backbone_last.tar'.format(ckpt_dir))\n\n    return acc_max, epoch_max\n\n\ndef pretrain_InterSampleRel(x_train, y_train, opt):\n    K = opt.K\n    batch_size = opt.batch_size  # 128 has been used in the paper\n    tot_epochs = opt.epochs  # 400 has been used in the paper\n    feature_size = opt.feature_size\n    ckpt_dir = opt.ckpt_dir\n\n    prob = 0.2  # Transform Probability\n    raw = transforms.Raw()\n    cutout = transforms.Cutout(sigma=0.1, p=prob)\n    jitter = transforms.Jitter(sigma=0.2, p=prob)\n    scaling = transforms.Scaling(sigma=0.4, p=prob)\n    magnitude_warp = transforms.MagnitudeWrap(sigma=0.3, knot=4, p=prob)\n    time_warp = transforms.TimeWarp(sigma=0.2, knot=8, p=prob)\n    window_slice = transforms.WindowSlice(reduce_ratio=0.8, p=prob)\n    window_warp = transforms.WindowWarp(window_ratio=0.3, scales=(0.5, 2), p=prob)\n\n    transforms_list = {'jitter': [jitter],\n                       'cutout': [cutout],\n                       'scaling': [scaling],\n                       'magnitude_warp': [magnitude_warp],\n                       'time_warp': [time_warp],\n                       'window_slice': [window_slice],\n                       'window_warp': [window_warp],\n                       'G0': [jitter, magnitude_warp, window_slice],\n                       'G1': [jitter, time_warp, window_slice],\n                       'G2': [jitter, time_warp, window_slice, window_warp, cutout],\n                       'none': [raw]}\n\n    transforms_targets = list()\n    for name in opt.aug_type:\n        for item in transforms_list[name]:\n            transforms_targets.append(item)\n    train_transform = transforms.Compose(transforms_targets + [transforms.ToTensor()])\n\n    backbone = SimConv4().cuda()\n    model = RelationalReasoning(backbone, feature_size).cuda()\n\n    train_set = MultiUCR2018(data=x_train, targets=y_train, K=K, transform=train_transform)\n    train_loader = torch.utils.data.DataLoader(train_set,\n                                               batch_size=batch_size,\n                                               shuffle=True)\n    torch.save(model.backbone.state_dict(), '{}/backbone_init.tar'.format(ckpt_dir))\n    acc_max, epoch_max = model.train(tot_epochs=tot_epochs, train_loader=train_loader, opt=opt)\n\n    torch.save(model.backbone.state_dict(), '{}/backbone_last.tar'.format(ckpt_dir))\n\n    return acc_max, epoch_max\n\n\ndef pretrain_SelfTime(x_train, y_train, opt, in_channel=1):\n    K = opt.K\n    batch_size = opt.batch_size  # 128 has been used in the paper\n    tot_epochs = opt.epochs  # 400 has been used in the paper\n    feature_size = opt.feature_size\n    ckpt_dir = opt.ckpt_dir\n\n    prob = 0.2  # Transform Probability\n    cutout = transforms.Cutout(sigma=0.1, p=prob)\n    jitter = transforms.Jitter(sigma=0.2, p=prob)\n    scaling = transforms.Scaling(sigma=0.4, p=prob)\n    magnitude_warp = transforms.MagnitudeWrap(sigma=0.3, knot=4, p=prob)\n    time_warp = transforms.TimeWarp(sigma=0.2, knot=8, p=prob)\n    window_slice = transforms.WindowSlice(reduce_ratio=0.8, p=prob)\n    window_warp = transforms.WindowWarp(window_ratio=0.3, scales=(0.5, 2), p=prob)\n\n    transforms_list = {'jitter': jitter,\n                       'cutout': cutout,\n                       'scaling': scaling,\n                       'magnitude_warp': magnitude_warp,\n                       'time_warp': time_warp,\n                       'window_slice': window_slice,\n                       'window_warp': window_warp,\n                       'G0': [jitter, magnitude_warp, window_slice],\n                       'G1': [jitter, time_warp, window_slice],\n                       'G2': [jitter, time_warp, window_slice, window_warp, cutout],\n                       'none': []}\n\n    transforms_targets = [transforms_list[name] for name in opt.aug_type]\n    train_transform = transforms.Compose(transforms_targets)\n    tensor_transform = transforms.ToTensor()\n\n    if '2C' in opt.class_type:\n        cut_piece = transforms.CutPiece2C(sigma=opt.piece_size)\n        nb_class=2\n    elif '3C' in opt.class_type:\n        cut_piece = transforms.CutPiece3C(sigma=opt.piece_size)\n        nb_class=3\n    elif '4C' in opt.class_type:\n        cut_piece = transforms.CutPiece4C(sigma=opt.piece_size)\n        nb_class=4\n    elif '5C' in opt.class_type:\n        cut_piece = transforms.CutPiece5C(sigma=opt.piece_size)\n        nb_class = 5\n    elif '6C' in opt.class_type:\n        cut_piece = transforms.CutPiece6C(sigma=opt.piece_size)\n        nb_class = 6\n    elif '7C' in opt.class_type:\n        cut_piece = transforms.CutPiece7C(sigma=opt.piece_size)\n        nb_class = 7\n    elif '8C' in opt.class_type:\n        cut_piece = transforms.CutPiece8C(sigma=opt.piece_size)\n        nb_class = 8\n\n    backbone = SimConv4(in_channel).cuda()\n    model = RelationalReasoning_InterIntra(backbone, feature_size, nb_class).cuda()\n\n    train_set = MultiUCR2018_InterIntra(data=x_train, targets=y_train, K=K,\n                                        transform=train_transform, transform_cut=cut_piece,\n                                        totensor_transform=tensor_transform)\n    \n    train_loader = torch.utils.data.DataLoader(train_set,\n                                               batch_size=batch_size,\n                                               shuffle=True)\n    torch.save(model.backbone.state_dict(), '{}/backbone_init.tar'.format(ckpt_dir))\n    acc_max, epoch_max = model.train(tot_epochs=tot_epochs, train_loader=train_loader, opt=opt)\n\n    torch.save(model.backbone.state_dict(), '{}/backbone_last.tar'.format(ckpt_dir))\n\n    return acc_max, epoch_max, model.backbone.state_dict()\n\n\n\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/optim/pytorchtools.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch\n\nclass EarlyStopping:\n    \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\n    def __init__(self, patience=50, verbose=False, delta=0, checkpoint_pth='chechpoint.pt'):\n        \"\"\"\n        Args:\n            patience (int): How long to wait after last time validation loss improved.\n                            Default: 7\n            verbose (bool): If True, prints a message for each validation loss improvement.\n                            Default: False\n            delta (float): Minimum change in the monitored quantity to qualify as an improvement.\n                            Default: 0\n        \"\"\"\n        self.patience = patience\n        self.verbose = verbose\n        self.counter = 0\n        self.best_score = None\n        self.early_stop = False\n        self.val_loss_min = np.Inf\n        self.delta = delta\n        self.checkpoint_pth = checkpoint_pth\n\n    def __call__(self, val_loss, model):\n\n        score = val_loss\n\n        if self.best_score is None:\n            self.best_score = score\n            # self.save_checkpoint(val_loss, model, self.checkpoint_pth)\n        elif score <= self.best_score + self.delta:\n            self.counter += 1\n            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n            if self.counter >= self.patience:\n                self.early_stop = True\n        else:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, self.checkpoint_pth)\n            self.counter = 0\n\n    def save_checkpoint(self, val_loss, model, checkpoint_pth):\n        '''Saves model when validation loss decrease.'''\n        if model is not None:\n            if self.verbose:\n                print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')\n            torch.save(model.state_dict(), checkpoint_pth)\n            self.val_loss_min = val_loss"
  },
  {
    "path": "ts_classification_methods/selftime_cls/optim/train.py",
    "content": "# -*- coding: utf-8 -*-\n\n\nimport torch\nimport utils.transforms as transforms\nfrom dataloader.ucr2018 import UCR2018\nimport torch.utils.data as data\nfrom optim.pytorchtools import EarlyStopping\nfrom model.model_backbone import SimConv4\nimport utils.transforms as transforms_ts\n\n\ndef supervised_train(x_train, y_train, x_val, y_val, x_test, y_test, nb_class, opt):\n    # construct data loader\n    # Those are the transformations used in the paper\n    prob = 0.2  # Transform Probability\n    cutout = transforms_ts.Cutout(sigma=0.1, p=prob)\n    jitter = transforms_ts.Jitter(sigma=0.2, p=prob)  # CIFAR10\n    scaling = transforms_ts.Scaling(sigma=0.4, p=prob)\n    magnitude_warp = transforms_ts.MagnitudeWrap(sigma=0.3, knot=4, p=prob)\n    time_warp = transforms_ts.TimeWarp(sigma=0.2, knot=8, p=prob)\n    window_slice = transforms_ts.WindowSlice(reduce_ratio=0.8, p=prob)\n    window_warp = transforms_ts.WindowWarp(window_ratio=0.3, scales=(0.5, 2), p=prob)\n\n    transforms_list = {'jitter': [jitter],\n                       'cutout': [cutout],\n                       'scaling': [scaling],\n                       'magnitude_warp': [magnitude_warp],\n                       'time_warp': [time_warp],\n                       'window_slice': [window_slice],\n                       'window_warp': [window_warp],\n                       'G0': [jitter, magnitude_warp, window_slice],\n                       'G1': [jitter, time_warp, window_slice],\n                       'G2': [jitter, time_warp, window_slice, window_warp, cutout],\n                       'none': []}\n\n    transforms_targets = list()\n    for name in opt.aug_type:\n        for item in transforms_list[name]:\n            transforms_targets.append(item)\n\n    train_transform = transforms_ts.Compose(transforms_targets + [transforms_ts.ToTensor()])\n    transform_lineval = transforms.Compose([transforms.ToTensor()])\n\n    train_set_lineval = UCR2018(data=x_train, targets=y_train, transform=train_transform)\n    val_set_lineval = UCR2018(data=x_val, targets=y_val, transform=transform_lineval)\n    test_set_lineval = UCR2018(data=x_test, targets=y_test, transform=transform_lineval)\n\n    train_loader_lineval = torch.utils.data.DataLoader(train_set_lineval, batch_size=128, shuffle=True)\n    val_loader_lineval = torch.utils.data.DataLoader(val_set_lineval, batch_size=128, shuffle=False)\n    test_loader_lineval = torch.utils.data.DataLoader(test_set_lineval, batch_size=128, shuffle=False)\n\n    # loading the saved backbone\n    backbone_lineval = SimConv4().cuda()  # defining a raw backbone model\n\n    # 64 are the number of output features in the backbone, and 10 the number of classes\n    linear_layer = torch.nn.Linear(opt.feature_size, nb_class).cuda()\n    optimizer = torch.optim.Adam([{'params': backbone_lineval.parameters()},\n                  {'params': linear_layer.parameters()}], lr=opt.learning_rate)\n\n    CE = torch.nn.CrossEntropyLoss()\n\n    early_stopping = EarlyStopping(opt.patience_test, verbose=True,\n                                   checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))\n\n    torch.save(backbone_lineval.state_dict(), '{}/backbone_init.tar'.format(opt.ckpt_dir))\n\n    best_acc = 0\n    best_epoch = 0\n\n    print('Supervised Train')\n    for epoch in range(opt.epochs_test):\n        backbone_lineval.train()\n        linear_layer.train()\n\n        acc_trains = list()\n        for i, (data, target) in enumerate(train_loader_lineval):\n            optimizer.zero_grad()\n            data = data.cuda()\n            target = target.cuda()\n\n            output = backbone_lineval(data)\n            output = linear_layer(output)\n            loss = CE(output, target)\n            loss.backward()\n            optimizer.step()\n            # estimate the accuracy\n            prediction = output.argmax(-1)\n            correct = prediction.eq(target.view_as(prediction)).sum()\n            accuracy = (100.0 * correct / len(target))\n            acc_trains.append(accuracy.item())\n\n        print('[Train-{}][{}] loss: {:.5f}; \\t Acc: {:.2f}%' \\\n              .format(epoch + 1, opt.model_name, loss.item(), sum(acc_trains) / len(acc_trains)))\n\n        acc_vals = list()\n        acc_tests = list()\n        backbone_lineval.eval()\n        linear_layer.eval()\n        with torch.no_grad():\n            for i, (data, target) in enumerate(val_loader_lineval):\n                data = data.cuda()\n                target = target.cuda()\n\n                output = backbone_lineval(data).detach()\n                output = linear_layer(output)\n                # estimate the accuracy\n                prediction = output.argmax(-1)\n                correct = prediction.eq(target.view_as(prediction)).sum()\n                accuracy = (100.0 * correct / len(target))\n                acc_vals.append(accuracy.item())\n\n            val_acc = sum(acc_vals) / len(acc_vals)\n            if val_acc >= best_acc:\n                best_acc = val_acc\n                best_epoch = epoch\n                for i, (data, target) in enumerate(test_loader_lineval):\n                    data = data.cuda()\n                    target = target.cuda()\n\n                    output = backbone_lineval(data).detach()\n                    output = linear_layer(output)\n                    # estimate the accuracy\n                    prediction = output.argmax(-1)\n                    correct = prediction.eq(target.view_as(prediction)).sum()\n                    accuracy = (100.0 * correct / len(target))\n                    acc_tests.append(accuracy.item())\n\n                test_acc = sum(acc_tests) / len(acc_tests)\n\n        print('[Test-{}] Val ACC:{:.2f}%, Best Test ACC.: {:.2f}% in Epoch {}'.format(\n            epoch, val_acc, test_acc, best_epoch))\n        early_stopping(val_acc, backbone_lineval)\n        if early_stopping.early_stop:\n            print(\"Early stopping\")\n            break\n    torch.save(backbone_lineval.state_dict(), '{}/backbone_last.tar'.format(opt.ckpt_dir))\n\n    return test_acc, best_epoch\n\n\n\n\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/scripts/ucr.sh",
    "content": "python -u train_ssl.py --dataset_name Herring --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ProximalPhalanxOutlineAgeGroup --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name UWaveGestureLibraryAll --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name FiftyWords --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name PLAID --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SmoothSubspace --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Lightning7 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Mallat --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name FordB --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name FaceFour --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Fungi --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name EOGHorizontalSignal --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ChlorineConcentration --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ECGFiveDays --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Computers --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name InsectEPGRegularTrain --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name DodgerLoopDay --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Wafer --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name FaceAll --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name MiddlePhalanxOutlineAgeGroup --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Phoneme --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SonyAIBORobotSurface2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name DistalPhalanxOutlineAgeGroup --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GunPointOldVersusYoung --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name CBF --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Haptics --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name CinCECGTorso --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ECG5000 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name MedicalImages --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ShakeGestureWiimoteZ --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Rock --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SmallKitchenAppliances --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name BeetleFly --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name WordSynonyms --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ItalyPowerDemand --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name LargeKitchenAppliances --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Yoga --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name HouseTwenty --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name FordA --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Meat --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ToeSegmentation2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GestureMidAirD2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name NonInvasiveFetalECGThorax1 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Adiac --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name UWaveGestureLibraryY --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Crop --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name BirdChicken --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name AllGestureWiimoteY --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ElectricDevices --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ShapeletSim --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name UWaveGestureLibraryX --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name CricketZ --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name OSULeaf --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name DistalPhalanxOutlineCorrect --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name FreezerRegularTrain --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Ham --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name AllGestureWiimoteX --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name MixedShapesRegularTrain --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name MiddlePhalanxOutlineCorrect --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Plane --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name PigArtPressure --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SyntheticControl --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Fish --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name MelbournePedestrian --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ShapesAll --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name InsectEPGSmallTrain --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Symbols --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name PhalangesOutlinesCorrect --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name AllGestureWiimoteZ --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name CricketY --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name MixedShapesSmallTrain --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name TwoLeadECG --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Chinatown --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ToeSegmentation1 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name HandOutlines --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Worms --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SemgHandSubjectCh2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Wine --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ACSF1 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ProximalPhalanxOutlineCorrect --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name DodgerLoopWeekend --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name DodgerLoopGame --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name DistalPhalanxTW --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GesturePebbleZ2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name PigAirwayPressure --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Beef --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Strawberry --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name BME --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name MoteStrain --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name MiddlePhalanxTW --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name FacesUCR --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GunPoint --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name PickupGestureWiimoteZ --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GestureMidAirD1 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ProximalPhalanxTW --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SwedishLeaf --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Lightning2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name OliveOil --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Coffee --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SemgHandGenderCh2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Car --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name InsectWingbeatSound --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GunPointMaleVersusFemale --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name RefrigerationDevices --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ArrowHead --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name InlineSkate --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name DiatomSizeReduction --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GunPointAgeSpan --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name FreezerSmallTrain --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name WormsTwoClass --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GesturePebbleZ1 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name EthanolLevel --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ScreenType --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name TwoPatterns --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name PowerCons --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name StarLightCurves --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Earthquakes --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name UWaveGestureLibraryZ --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SemgHandMovementCh2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name SonyAIBORobotSurface1 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name ECG200 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name GestureMidAirD3 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name PigCVP --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name EOGVerticalSignal --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name UMD --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name NonInvasiveFetalECGThorax2 --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name CricketX --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_name Trace --model_name SelfTime --random_seed 42\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/train_ssl.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom evaluation.eval_ssl import evaluation\nfrom utils.utils import get_config_from_json\nimport torch\nimport argparse\nfrom optim.pretrain import *\nimport datetime\nimport random\nfrom data.preprocessing import *\nimport os\nimport sys\nsys.path.append('..')\n\n\ndef parse_option():\n    parser = argparse.ArgumentParser('argument for training')\n    parser.add_argument('--save_freq', type=int, default=200,\n                        help='save frequency')\n    parser.add_argument('--batch_size', type=int, default=128,\n                        help='batch_size')\n    # Bigger is better.\n    parser.add_argument('--K', type=int, default=16,\n                        help='Number of augmentation for each sample')\n\n    parser.add_argument('--feature_size', type=int, default=64,\n                        help='feature_size')\n    parser.add_argument('--num_workers', type=int, default=16,\n                        help='num of workers to use')\n    parser.add_argument('--epochs', type=int, default=400,  # 400\n                        help='number of training epochs')\n    parser.add_argument('--patience', type=int, default=400,\n                        help='training patience')\n    parser.add_argument('--aug_type', type=str,\n                        default='none', help='Augmentation type')\n    parser.add_argument('--piece_size', type=float, default=0.2,\n                        help='piece size for time series piece sampling')\n    parser.add_argument('--class_type', type=str,\n                        default='3C', help='Classification type')\n\n    # optimization\n    parser.add_argument('--learning_rate', type=float, default=0.01,\n                        help='learning rate')\n    # model dataset\n    parser.add_argument('--dataset_name', type=str, default='CricketX',\n                        help='dataset')\n    parser.add_argument('--ucr_path', type=str, default='/dev_data/zzj/hzy/datasets/UCR',\n                        help='Data root for dataset.')\n    parser.add_argument('--ckpt_dir', type=str, default='./ckpt/',\n                        help='Data path for checkpoint.')\n    # method\n    parser.add_argument('--backbone', type=str, default='SimConv4')\n    parser.add_argument('--model_name', type=str, default='InterSample',\n                        choices=['InterSample', 'IntraTemporal', 'SelfTime'], help='choose method')\n    parser.add_argument('--config_dir', type=str,\n                        default='./config', help='The Configuration Dir')\n    parser.add_argument('--gpus', type=str, default='0', help='selected gpu')\n    parser.add_argument('--random_seed', type=int,\n                        default=42, help='for reproduction purpose')\n    opt = parser.parse_args()\n    return opt\n\n\nif __name__ == \"__main__\":\n\n    opt = parse_option()\n    exp = 'linear_eval'\n\n    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    # set seed\n    random.seed(opt.random_seed)\n    np.random.seed(opt.random_seed)\n    torch.manual_seed(opt.random_seed)\n    torch.cuda.manual_seed(opt.random_seed)\n    torch.cuda.manual_seed_all(opt.random_seed)\n    aug1 = ['magnitude_warp']\n    aug2 = ['time_warp']\n\n    # use uwave config\n    config_dict = get_config_from_json('{}/{}_config.json'.format(\n        opt.config_dir, 'UWaveGestureLibraryAll'))\n\n    opt.class_type = config_dict['class_type']\n    opt.piece_size = config_dict['piece_size']\n\n    if opt.model_name == 'InterSample':\n        model_paras = 'none'\n    else:\n        model_paras = '{}_{}'.format(opt.piece_size, opt.class_type)\n\n    if aug1 == aug2:\n        opt.aug_type = [aug1]\n    elif type(aug1) is list:\n        opt.aug_type = aug1 + aug2\n    else:\n        opt.aug_type = [aug1, aug2]\n\n    log_dir = './log/{}/{}/{}/{}/{}'.format(\n        exp, opt.dataset_name, opt.model_name, '_'.join(opt.aug_type), model_paras)\n\n    if not os.path.exists(log_dir):\n        os.makedirs(log_dir)\n\n    file2print_detail_train = open(\"{}/train_detail.log\".format(log_dir), 'a+')\n    print(datetime.datetime.now(), file=file2print_detail_train)\n    print(\"Dataset\\tTrain\\tTest\\tDimension\\tClass\\tSeed\\tAcc_max\\tEpoch_max\",\n          file=file2print_detail_train)\n    file2print_detail_train.flush()\n\n    sum_dataset, sum_target, nb_class = load_data(\n        opt.ucr_path, opt.dataset_name)\n    sum_dataset = np.expand_dims(sum_dataset, 2)\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = k_fold(\n        sum_dataset, sum_target)\n\n    accu = []\n    if not os.path.exists(opt.ckpt_dir):\n        os.makedirs(opt.ckpt_dir)\n\n    print('[INFO] Running at:', opt.dataset_name)\n    save_path = './ucr_result.csv'\n    for i, x_train in enumerate(train_datasets):\n        print('{} fold start training!'.format(i))\n        y_train = train_targets[i]\n        x_val = val_datasets[i]\n        y_val = val_targets[i]\n        x_test = test_datasets[i]\n        y_test = test_targets[i]\n\n        x_train, x_val, x_test = fill_nan_value(x_train, x_val, x_test)\n        x_train, x_val, x_test = normalize_per_series(\n            x_train), normalize_per_series(x_val), normalize_per_series(x_test)\n        if opt.model_name == 'InterSample':\n            acc_max, epoch_max = pretrain_InterSampleRel(x_train, y_train, opt)\n        elif 'IntraTemporal' in opt.model_name:\n            acc_max, epoch_max = pretrain_IntraSampleRel(x_train, y_train, opt)\n        elif 'SelfTime' in opt.model_name:\n            acc_max, epoch_max, model_state_dict = pretrain_SelfTime(\n                x_train, y_train, opt)\n            acc_test, epoch_max_point = evaluation(x_train, y_train, x_val, y_val, x_test, y_test,\n                                                   nb_class=nb_class, ckpt=None, opt=opt, ckpt_tosave=None, my_state=model_state_dict)\n\n        accu.append(acc_test)\n\n    accu = np.array(accu)\n    acc_mean = np.mean(accu)\n    acc_std = np.std(accu)\n\n    if os.path.exists(save_path):\n        result_form = pd.read_csv(save_path)\n    else:\n        result_form = pd.DataFrame(columns=['target', 'accuracy', 'std'])\n\n    result_form = result_form.append(\n        {'target': opt.dataset_name, 'accuracy': '%.4f' % acc_mean, 'std': '%.4f' % acc_std}, ignore_index=True)\n    result_form = result_form.iloc[:, -3:]\n    result_form.to_csv(save_path)\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/augmentation.py",
    "content": "import numpy as np\nfrom tqdm import tqdm\nimport utils.helper as hlp\n\n\n\ndef slidewindow(ts, horizon=.2, stride=0.2):\n    xf = []\n    yf = []\n    for i in range(0, ts.shape[0], int(stride * ts.shape[0])):\n        horizon1 = int(horizon * ts.shape[0])\n        if (i + horizon1 + horizon1 <= ts.shape[0]):\n            xf.append(ts[i:i + horizon1,0])\n            yf.append(ts[i + horizon1:i + horizon1 + horizon1, 0])\n\n    xf = np.asarray(xf)\n    yf = np.asarray(yf)\n\n    return xf, yf\n\n\n\n\ndef cutout(ts, perc=.1):\n    seq_len = ts.shape[0]\n    new_ts = ts.copy()\n    win_len = int(perc * seq_len)\n    start = np.random.randint(0, seq_len-win_len-1)\n    end = start + win_len\n    start = max(0, start)\n    end = min(end, seq_len)\n    # print(\"[INFO] start={}, end={}\".format(start, end))\n    new_ts[start:end, ...] = 0\n    # return new_ts, ts[start:end, ...]\n    return new_ts\n\n\ndef cut_piece2C(ts, perc=.1):\n    seq_len = ts.shape[0]\n    win_class = seq_len/(2*2)\n\n    if perc<1:\n        win_len = int(perc * seq_len)\n    else:\n        win_len = perc\n\n    start1 = np.random.randint(0, seq_len-win_len)\n    end1 = start1 + win_len\n    start2 = np.random.randint(0, seq_len - win_len)\n    end2 = start2 + win_len\n\n    if abs(start1-start2)<(win_class):\n        label=0\n    else:\n        label=1\n    return ts[start1:end1, ...], ts[start2:end2, ...], label\n\n\ndef cut_piece3C(ts, perc=.1):\n    seq_len = ts.shape[0]\n    win_class = seq_len/(2*3)\n\n    if perc<1:\n        win_len = int(perc * seq_len)\n    else:\n        win_len = perc\n\n    start1 = np.random.randint(0, seq_len-win_len)\n    end1 = start1 + win_len\n    start2 = np.random.randint(0, seq_len - win_len)\n    end2 = start2 + win_len\n\n    if abs(start1-start2)<(win_class):\n        label=0\n    elif abs(start1-start2)<(2*win_class):\n        label=1\n    else:\n        label=2\n\n    return ts[start1:end1, ...], ts[start2:end2, ...], label\n\n\ndef cut_piece4C(ts, perc=.1):\n    seq_len = ts.shape[0]\n    win_class = seq_len / (2 * 4)\n\n    if perc < 1:\n        win_len = int(perc * seq_len)\n    else:\n        win_len = perc\n\n    start1 = np.random.randint(0, seq_len - win_len)\n    end1 = start1 + win_len\n    start2 = np.random.randint(0, seq_len - win_len)\n    end2 = start2 + win_len\n\n    if abs(start1 - start2) < (win_class):\n        label = 0\n    elif abs(start1 - start2) < (2 * win_class):\n        label = 1\n    elif abs(start1 - start2) < (3 * win_class):\n        label = 2\n    else:\n        label = 3\n\n    return ts[start1:end1, ...], ts[start2:end2, ...], label\n\n\ndef cut_piece5C(ts, perc=.1):\n    seq_len = ts.shape[0]\n    win_class = seq_len / (2 * 5)\n\n    if perc < 1:\n        win_len = int(perc * seq_len)\n    else:\n        win_len = perc\n\n    start1 = np.random.randint(0, seq_len - win_len)\n    end1 = start1 + win_len\n    start2 = np.random.randint(0, seq_len - win_len)\n    end2 = start2 + win_len\n\n    if abs(start1 - start2) < (win_class):\n        label = 0\n    elif abs(start1 - start2) < (2 * win_class):\n        label = 1\n    elif abs(start1 - start2) < (3 * win_class):\n        label = 2\n    elif abs(start1 - start2) < (4 * win_class):\n        label = 3\n    else:\n        label = 4\n\n    return ts[start1:end1, ...], ts[start2:end2, ...], label\n\n\ndef cut_piece6C(ts, perc=.1):\n    seq_len = ts.shape[0]\n    win_class = seq_len / (2 * 6)\n\n    if perc < 1:\n        win_len = int(perc * seq_len)\n    else:\n        win_len = perc\n\n    start1 = np.random.randint(0, seq_len - win_len)\n    end1 = start1 + win_len\n    start2 = np.random.randint(0, seq_len - win_len)\n    end2 = start2 + win_len\n\n    if abs(start1 - start2) < (win_class):\n        label = 0\n    elif abs(start1 - start2) < (2 * win_class):\n        label = 1\n    elif abs(start1 - start2) < (3 * win_class):\n        label = 2\n    elif abs(start1 - start2) < (4 * win_class):\n        label = 3\n    elif abs(start1 - start2) < (5 * win_class):\n        label = 4\n    else:\n        label = 5\n\n    return ts[start1:end1, ...], ts[start2:end2, ...], label\n\n\ndef cut_piece7C(ts, perc=.1):\n    seq_len = ts.shape[0]\n    win_class = seq_len / (2 * 7)\n\n    if perc < 1:\n        win_len = int(perc * seq_len)\n    else:\n        win_len = perc\n\n    start1 = np.random.randint(0, seq_len - win_len)\n    end1 = start1 + win_len\n    start2 = np.random.randint(0, seq_len - win_len)\n    end2 = start2 + win_len\n\n    if abs(start1 - start2) < (win_class):\n        label = 0\n    elif abs(start1 - start2) < (2 * win_class):\n        label = 1\n    elif abs(start1 - start2) < (3 * win_class):\n        label = 2\n    elif abs(start1 - start2) < (4 * win_class):\n        label = 3\n    elif abs(start1 - start2) < (5 * win_class):\n        label = 4\n    elif abs(start1 - start2) < (6 * win_class):\n        label = 5\n    else:\n        label = 6\n\n    return ts[start1:end1, ...], ts[start2:end2, ...], label\n\n\ndef cut_piece8C(ts, perc=.1):\n    seq_len = ts.shape[0]\n    win_class = seq_len / (2 * 8)\n\n    if perc < 1:\n        win_len = int(perc * seq_len)\n    else:\n        win_len = perc\n\n    start1 = np.random.randint(0, seq_len - win_len)\n    end1 = start1 + win_len\n    start2 = np.random.randint(0, seq_len - win_len)\n    end2 = start2 + win_len\n\n    if abs(start1 - start2) < (win_class):\n        label = 0\n    elif abs(start1 - start2) < (2 * win_class):\n        label = 1\n    elif abs(start1 - start2) < (3 * win_class):\n        label = 2\n    elif abs(start1 - start2) < (4 * win_class):\n        label = 3\n    elif abs(start1 - start2) < (5 * win_class):\n        label = 4\n    elif abs(start1 - start2) < (6 * win_class):\n        label = 5\n    elif abs(start1 - start2) < (7 * win_class):\n        label = 6\n    else:\n        label = 7\n\n    return ts[start1:end1, ...], ts[start2:end2, ...], label\n\n\ndef jitter(x, sigma=0.03):\n    # https://arxiv.org/pdf/1706.00527.pdf\n    return x + np.random.normal(loc=0., scale=sigma, size=x.shape)\n\ndef scaling(x, sigma=0.1):\n    # https://arxiv.org/pdf/1706.00527.pdf\n    factor = np.random.normal(loc=1., scale=sigma, size=(x.shape[0],x.shape[2]))\n    return np.multiply(x, factor[:,np.newaxis,:])\n\ndef rotation(x):\n    flip = np.random.choice([-1, 1], size=(x.shape[0],x.shape[2]))\n    rotate_axis = np.arange(x.shape[2])\n    np.random.shuffle(rotate_axis)\n    return flip[:,np.newaxis,:] * x[:,:,rotate_axis]\n\ndef scaling_s(x, sigma=0.1, plot=False):\n    # https://arxiv.org/pdf/1706.00527.pdf\n    factor = np.random.normal(loc=1., scale=sigma, size=(1, x.shape[1]))\n    x_ = np.multiply(x, factor[:, :])\n\n    if plot:\n        hlp.plot1d(x, x_, save_file='aug_examples/scal.png')\n\n    return x_\n\ndef rotation_s(x, plot=False):\n    flip = np.random.choice([-1], size=(1, x.shape[1]))\n    rotate_axis = np.arange(x.shape[1])\n    np.random.shuffle(rotate_axis)\n    x_ = flip[:, :] * x[:, rotate_axis]\n    if plot:\n        hlp.plot1d(x, x_, save_file='aug_examples/rotation_s.png')\n    return x_\n\ndef rotation2d(x, sigma=0.2):\n    thetas = np.random.normal(loc=0, scale=sigma, size=(x.shape[0]))\n    c = np.cos(thetas)\n    s = np.sin(thetas)\n\n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        rot = np.array(((c[i], -s[i]), (s[i], c[i])))\n        ret[i] = np.dot(pat, rot)\n    return ret\n\ndef permutation(x, max_segments=5, seg_mode=\"equal\"):\n    orig_steps = np.arange(x.shape[1])\n\n    num_segs = np.random.randint(1, max_segments, size=(x.shape[0]))\n\n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        if num_segs[i] > 1:\n            if seg_mode == \"random\":\n                split_points = np.random.choice(x.shape[1]-2, num_segs[i]-1, replace=False)\n                split_points.sort()\n                splits = np.split(orig_steps, split_points)\n            else:\n                splits = np.array_split(orig_steps, num_segs[i])\n            warp = np.concatenate(np.random.permutation(splits)).ravel()\n            ret[i] = pat[warp]\n        else:\n            ret[i] = pat\n    return ret\n\ndef magnitude_warp(x, sigma=0.2, knot=4):\n    from scipy.interpolate import CubicSpline\n    orig_steps = np.arange(x.shape[1])\n    \n    random_warps = np.random.normal(loc=1.0, scale=sigma, size=(x.shape[0], knot+2, x.shape[2]))\n    warp_steps = (np.ones((x.shape[2],1))*(np.linspace(0, x.shape[1]-1., num=knot+2))).T\n\n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n\n        li = []\n        for dim in range(x.shape[2]):\n            li.append(CubicSpline(warp_steps[:, dim], random_warps[i, :, dim])(orig_steps))\n        warper = np.array(li).T\n\n        ret[i] = pat * warper\n\n    return ret\n\n\ndef magnitude_warp_s(x, sigma=0.2, knot=4, plot=False):\n    from scipy.interpolate import CubicSpline\n    orig_steps = np.arange(x.shape[0])\n\n    random_warps = np.random.normal(loc=1.0, scale=sigma, size=(1, knot + 2, x.shape[1]))\n    warp_steps = (np.ones((x.shape[1], 1)) * (np.linspace(0, x.shape[0] - 1., num=knot + 2))).T\n\n    li = []\n    for dim in range(x.shape[1]):\n        li.append(CubicSpline(warp_steps[:, dim], random_warps[0, :, dim])(orig_steps))\n    warper = np.array(li).T\n\n    x_ = x * warper\n\n    if plot:\n        hlp.plot1d(x, x_, save_file='aug_examples/magnitude_warp_s.png')\n    return x_\n\n\ndef time_warp(x, sigma=0.2, knot=4):\n    from scipy.interpolate import CubicSpline\n    orig_steps = np.arange(x.shape[1])\n    \n    random_warps = np.random.normal(loc=1.0, scale=sigma, size=(x.shape[0], knot+2, x.shape[2]))\n    warp_steps = (np.ones((x.shape[2],1))*(np.linspace(0, x.shape[1]-1., num=knot+2))).T\n    \n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        for dim in range(x.shape[2]):\n            time_warp = CubicSpline(warp_steps[:,dim], warp_steps[:,dim] * random_warps[i,:,dim])(orig_steps)\n            scale = (x.shape[1]-1)/time_warp[-1]\n            ret[i,:,dim] = np.interp(orig_steps, np.clip(scale*time_warp, 0, x.shape[1]-1), pat[:,dim]).T\n    return ret\n\n\ndef time_warp_s(x, sigma=0.2, knot=4, plot=False):\n    from scipy.interpolate import CubicSpline\n    orig_steps = np.arange(x.shape[0])\n\n    random_warps = np.random.normal(loc=1.0, scale=sigma, size=(1, knot + 2, x.shape[1]))\n    warp_steps = (np.ones((x.shape[1], 1)) * (np.linspace(0, x.shape[0] - 1., num=knot + 2))).T\n\n    ret = np.zeros_like(x)\n    for dim in range(x.shape[1]):\n        time_warp = CubicSpline(warp_steps[:, dim],\n                                warp_steps[:, dim] * random_warps[0, :, dim])(orig_steps)\n        scale = (x.shape[0] - 1) / time_warp[-1]\n        ret[:, dim] = np.interp(orig_steps, np.clip(scale * time_warp, 0, x.shape[0] - 1),\n                                   x[:, dim]).T\n    if plot:\n        hlp.plot1d(x, ret, save_file='aug_examples/time_warp_s.png')\n    return ret\n\n\ndef window_slice(x, reduce_ratio=0.9):\n    # https://halshs.archives-ouvertes.fr/halshs-01357973/document\n    target_len = np.ceil(reduce_ratio*x.shape[1]).astype(int)\n    if target_len >= x.shape[1]:\n        return x\n    starts = np.random.randint(low=0, high=x.shape[1]-target_len, size=(x.shape[0])).astype(int)\n    ends = (target_len + starts).astype(int)\n    \n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        for dim in range(x.shape[2]):\n            ret[i,:,dim] = np.interp(np.linspace(0, target_len, num=x.shape[1]), np.arange(target_len), pat[starts[i]:ends[i],dim]).T\n    return ret\n\n\ndef window_slice_s(x, reduce_ratio=0.9):\n    # https://halshs.archives-ouvertes.fr/halshs-01357973/document\n    target_len = np.ceil(reduce_ratio * x.shape[0]).astype(int)\n    if target_len >= x.shape[0]:\n        return x\n    starts = np.random.randint(low=0, high=x.shape[0] - target_len, size=(1)).astype(int)\n    ends = (target_len + starts).astype(int)\n\n    ret = np.zeros_like(x)\n    for dim in range(x.shape[1]):\n        ret[:, dim] = np.interp(np.linspace(0, target_len, num=x.shape[0]), np.arange(target_len),\n                                   x[starts[0]:ends[0], dim]).T\n    return ret\n\n\ndef window_warp(x, window_ratio=0.1, scales=[0.5, 2.]):\n    # https://halshs.archives-ouvertes.fr/halshs-01357973/document\n    warp_scales = np.random.choice(scales, x.shape[0])\n    warp_size = np.ceil(window_ratio*x.shape[1]).astype(int)\n    window_steps = np.arange(warp_size)\n        \n    window_starts = np.random.randint(low=1, high=x.shape[1]-warp_size-1, size=(x.shape[0])).astype(int)\n    window_ends = (window_starts + warp_size).astype(int)\n            \n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        for dim in range(x.shape[2]):\n            start_seg = pat[:window_starts[i],dim]\n            window_seg = np.interp(np.linspace(0, warp_size-1, num=int(warp_size*warp_scales[i])), window_steps, pat[window_starts[i]:window_ends[i],dim])\n            end_seg = pat[window_ends[i]:,dim]\n            warped = np.concatenate((start_seg, window_seg, end_seg))                \n            ret[i,:,dim] = np.interp(np.arange(x.shape[1]), np.linspace(0, x.shape[1]-1., num=warped.size), warped).T\n    return ret\n\n\ndef window_warp_s(x, window_ratio=0.1, scales=[0.5, 2.]):\n    # https://halshs.archives-ouvertes.fr/halshs-01357973/document\n    warp_scales = np.random.choice(scales, 1)\n    warp_size = np.ceil(window_ratio * x.shape[0]).astype(int)\n    window_steps = np.arange(warp_size)\n\n    window_starts = np.random.randint(low=1, high=x.shape[0] - warp_size - 1, size=(1)).astype(int)\n    window_ends = (window_starts + warp_size).astype(int)\n\n    ret = np.zeros_like(x)\n    pat=x\n    for dim in range(x.shape[1]):\n        start_seg = pat[:window_starts[0], dim]\n        window_seg = np.interp(np.linspace(0, warp_size - 1,\n                                           num=int(warp_size * warp_scales[0])), window_steps,\n                               pat[window_starts[0]:window_ends[0], dim])\n        end_seg = pat[window_ends[0]:, dim]\n        warped = np.concatenate((start_seg, window_seg, end_seg))\n        ret[:, dim] = np.interp(np.arange(x.shape[0]), np.linspace(0, x.shape[0] - 1., num=warped.size),\n                                   warped).T\n    return ret\n\ndef spawner(x, labels, sigma=0.05, verbose=0):\n    # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6983028/\n\n    import utils.dtw as dtw\n    random_points = np.random.randint(low=1, high=x.shape[1]-1, size=x.shape[0])\n    window = np.ceil(x.shape[1] / 10.).astype(int)\n    orig_steps = np.arange(x.shape[1])\n    l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels\n\n    ret = np.zeros_like(x)\n    for i, pat in enumerate(tqdm(x)):\n        # guarentees that same one isnt selected\n        choices = np.delete(np.arange(x.shape[0]), i)\n        # remove ones of different classes\n        choices = np.where(l[choices] == l[i])[0]\n        if choices.size > 0:\n            random_sample = x[np.random.choice(choices)]\n            # SPAWNER splits the path into two randomly\n            path1 = dtw.dtw(pat[:random_points[i]], random_sample[:random_points[i]], dtw.RETURN_PATH, slope_constraint=\"symmetric\", window=window)\n            path2 = dtw.dtw(pat[random_points[i]:], random_sample[random_points[i]:], dtw.RETURN_PATH, slope_constraint=\"symmetric\", window=window)\n            combined = np.concatenate((np.vstack(path1), np.vstack(path2+random_points[i])), axis=1)\n            if verbose:\n                print(random_points[i])\n                dtw_value, cost, DTW_map, path = dtw.dtw(pat, random_sample,\n                                                         return_flag = dtw.RETURN_ALL,\n                                                         slope_constraint=slope_constraint,\n                                                         window=window)\n                dtw.draw_graph1d(cost, DTW_map, path, pat, random_sample)\n                dtw.draw_graph1d(cost, DTW_map, combined, pat, random_sample)\n            mean = np.mean([pat[combined[0]], random_sample[combined[1]]], axis=0)\n            for dim in range(x.shape[2]):\n                ret[i,:,dim] = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=mean.shape[0]), mean[:,dim]).T\n        else:\n            print(\"There is only one pattern of class %d, skipping pattern average\"%l[i])\n            ret[i,:] = pat\n    return jitter(ret, sigma=sigma)\n\ndef wdba(x, labels, batch_size=6, slope_constraint=\"symmetric\", use_window=True):\n    # https://ieeexplore.ieee.org/document/8215569\n\n    import utils.dtw as dtw\n\n    if use_window:\n        window = np.ceil(x.shape[1] / 10.).astype(int)\n    else:\n        window = None\n    orig_steps = np.arange(x.shape[1])\n    l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels\n\n    ret = np.zeros_like(x)\n    for i in tqdm(range(ret.shape[0])):\n        # get the same class as i\n        choices = np.where(l == l[i])[0]\n        if choices.size > 0:\n            # pick random intra-class pattern\n            k = min(choices.size, batch_size)\n            random_prototypes = x[np.random.choice(choices, k, replace=False)]\n\n            # calculate dtw between all\n            dtw_matrix = np.zeros((k, k))\n            for p, prototype in enumerate(random_prototypes):\n                for s, sample in enumerate(random_prototypes):\n                    if p == s:\n                        dtw_matrix[p, s] = 0.\n                    else:\n                        dtw_matrix[p, s] = dtw.dtw(prototype, sample, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n\n            # get medoid\n            medoid_id = np.argsort(np.sum(dtw_matrix, axis=1))[0]\n            nearest_order = np.argsort(dtw_matrix[medoid_id])\n            medoid_pattern = random_prototypes[medoid_id]\n\n            # start weighted DBA\n            average_pattern = np.zeros_like(medoid_pattern)\n            weighted_sums = np.zeros((medoid_pattern.shape[0]))\n            for nid in nearest_order:\n                if nid == medoid_id or dtw_matrix[medoid_id, nearest_order[1]] == 0.:\n                    average_pattern += medoid_pattern\n                    weighted_sums += np.ones_like(weighted_sums)\n                else:\n                    path = dtw.dtw(medoid_pattern, random_prototypes[nid], dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n                    dtw_value = dtw_matrix[medoid_id, nid]\n                    warped = random_prototypes[nid, path[1]]\n                    weight = np.exp(np.log(0.5)*dtw_value/dtw_matrix[medoid_id, nearest_order[1]])\n                    average_pattern[path[0]] += weight * warped\n                    weighted_sums[path[0]] += weight\n\n            ret[i,:] = average_pattern / weighted_sums[:,np.newaxis]\n        else:\n            print(\"There is only one pattern of class %d, skipping pattern average\"%l[i])\n            ret[i,:] = x[i]\n    return ret\n\n# Proposed\n\ndef random_guided_warp(x, labels, slope_constraint=\"symmetric\", use_window=True, dtw_type=\"normal\"):\n    import utils.dtw as dtw\n\n    if use_window:\n        window = np.ceil(x.shape[1] / 10.).astype(int)\n    else:\n        window = None\n    orig_steps = np.arange(x.shape[1])\n    l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels\n\n    ret = np.zeros_like(x)\n    for i, pat in enumerate(tqdm(x)):\n        # guarentees that same one isnt selected\n        choices = np.delete(np.arange(x.shape[0]), i)\n        # remove ones of different classes\n        choices = np.where(l[choices] == l[i])[0]\n        if choices.size > 0:\n            # pick random intra-class pattern\n            random_prototype = x[np.random.choice(choices)]\n\n            if dtw_type == \"shape\":\n                path = dtw.shape_dtw(random_prototype, pat, dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n            else:\n                path = dtw.dtw(random_prototype, pat, dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n\n            # Time warp\n            warped = pat[path[1]]\n            for dim in range(x.shape[2]):\n                ret[i,:,dim] = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=warped.shape[0]), warped[:,dim]).T\n        else:\n            print(\"There is only one pattern of class %d, skipping timewarping\"%l[i])\n            ret[i,:] = pat\n    return ret\n\ndef discriminative_guided_warp(x, labels, batch_size=6, slope_constraint=\"symmetric\", use_window=True, dtw_type=\"normal\", use_variable_slice=True):\n    import utils.dtw as dtw\n\n    if use_window:\n        window = np.ceil(x.shape[1] / 10.).astype(int)\n    else:\n        window = None\n    orig_steps = np.arange(x.shape[1])\n    l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels\n\n    positive_batch = np.ceil(batch_size / 2).astype(int)\n    negative_batch = np.floor(batch_size / 2).astype(int)\n\n    ret = np.zeros_like(x)\n    warp_amount = np.zeros(x.shape[0])\n    for i, pat in enumerate(tqdm(x)):\n        # guarentees that same one isnt selected\n        choices = np.delete(np.arange(x.shape[0]), i)\n\n        # remove ones of different classes\n        positive = np.where(l[choices] == l[i])[0]\n        negative = np.where(l[choices] != l[i])[0]\n\n        if positive.size > 0 and negative.size > 0:\n            pos_k = min(positive.size, positive_batch)\n            neg_k = min(negative.size, negative_batch)\n            positive_prototypes = x[np.random.choice(positive, pos_k, replace=False)]\n            negative_prototypes = x[np.random.choice(negative, neg_k, replace=False)]\n\n            # vector embedding and nearest prototype in one\n            pos_aves = np.zeros((pos_k))\n            neg_aves = np.zeros((pos_k))\n            if dtw_type == \"shape\":\n                for p, pos_prot in enumerate(positive_prototypes):\n                    for ps, pos_samp in enumerate(positive_prototypes):\n                        if p != ps:\n                            pos_aves[p] += (1./(pos_k-1.))*dtw.shape_dtw(pos_prot, pos_samp, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                    for ns, neg_samp in enumerate(negative_prototypes):\n                        neg_aves[p] += (1./neg_k)*dtw.shape_dtw(pos_prot, neg_samp, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                selected_id = np.argmax(neg_aves - pos_aves)\n                path = dtw.shape_dtw(positive_prototypes[selected_id], pat, dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n            else:\n                for p, pos_prot in enumerate(positive_prototypes):\n                    for ps, pos_samp in enumerate(positive_prototypes):\n                        if p != ps:\n                            pos_aves[p] += (1./(pos_k-1.))*dtw.dtw(pos_prot, pos_samp, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                    for ns, neg_samp in enumerate(negative_prototypes):\n                        neg_aves[p] += (1./neg_k)*dtw.dtw(pos_prot, neg_samp, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                selected_id = np.argmax(neg_aves - pos_aves)\n                path = dtw.dtw(positive_prototypes[selected_id], pat, dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n\n            # Time warp\n            warped = pat[path[1]]\n            warp_path_interp = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=warped.shape[0]), path[1])\n            warp_amount[i] = np.sum(np.abs(orig_steps-warp_path_interp))\n            for dim in range(x.shape[2]):\n                ret[i,:,dim] = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=warped.shape[0]), warped[:,dim]).T\n        else:\n            print(\"There is only one pattern of class %d\"%l[i])\n            ret[i,:] = pat\n            warp_amount[i] = 0.\n    if use_variable_slice:\n        max_warp = np.max(warp_amount)\n        if max_warp == 0:\n            # unchanged\n            ret = window_slice(ret, reduce_ratio=0.95)\n        else:\n            for i, pat in enumerate(ret):\n                # Variable Sllicing\n                ret[i] = window_slice(pat[np.newaxis,:,:], reduce_ratio=0.95+0.05*warp_amount[i]/max_warp)[0]\n    return ret\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/datasets.py",
    "content": "def nb_dims(dataset):\n    if dataset in [\"unipen1a\", \"unipen1b\", \"unipen1c\"]:\n        return 2\n    return 1\n\ndef nb_classes(dataset):\n    if dataset=='MFPT':\n        return 15\n    if dataset == 'XJTU':\n        return 15\n    if dataset == \"CricketX\":\n        return 12 #300\n    if dataset == \"UWaveGestureLibraryAll\":\n        return 8 # 945\n    if dataset == \"DodgerLoopDay\":\n        return 7\n    if dataset == \"InsectWingbeatSound\":\n        return 11\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/helper.py",
    "content": "import numpy as np\n\ndef plot2d(x, y, x2=None, y2=None, x3=None, y3=None, xlim=(-1, 1), ylim=(-1, 1), save_file=\"\"):\n    import matplotlib.pyplot as plt\n\n    plt.figure(figsize=(4, 4))\n    plt.plot(x, y)\n    if x2 is not None and y2 is not None:\n        plt.plot(x2, y2)\n    if x3 is not None and y3 is not None:\n        plt.plot(x3, y3)\n    plt.xlim(xlim)\n    plt.ylim(ylim)\n    plt.tight_layout()\n    if save_file:\n        plt.savefig(save_file, \"\")\n    else:\n        plt.show()\n    return\n\ndef plot1d(x, x2=None, x3=None, ylim=(-1, 1), save_file=\"\"):\n    import matplotlib.pyplot as plt\n\n    plt.figure(figsize=(6, 3))\n    steps = np.arange(x.shape[0])\n    plt.plot(steps, x)\n    if x2 is not None:\n        plt.plot(steps, x2)\n    if x3 is not None:\n        plt.plot(steps, x3)\n    plt.xlim(0, x.shape[0])\n    plt.ylim(ylim)\n    plt.tight_layout()\n    if save_file:\n        plt.savefig(save_file)\n    else:\n        plt.show()\n    return"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/transforms.py",
    "content": "import random\nimport torch\nfrom utils.augmentation import *\n\n\nclass Raw:\n    def __init__(self):\n        pass\n\n    def __call__(self, data):\n        return data\n\n\nclass CutPiece2C:\n    def __init__(self, sigma):\n        self.sigma = sigma\n\n    def __call__(self, data):\n        return self.forward(data)\n\n    def forward(self, data):\n\n        return cut_piece2C(data, self.sigma)\n\n\nclass CutPiece3C:\n    def __init__(self, sigma):\n        self.sigma = sigma\n\n    def __call__(self, data):\n        return self.forward(data)\n\n    def forward(self, data):\n\n        return cut_piece3C(data, self.sigma)\n\n\nclass CutPiece4C:\n    def __init__(self, sigma):\n        self.sigma = sigma\n\n    def __call__(self, data):\n        return self.forward(data)\n\n    def forward(self, data):\n\n        return cut_piece4C(data, self.sigma)\n\n\nclass CutPiece5C:\n    def __init__(self, sigma):\n        self.sigma = sigma\n\n    def __call__(self, data):\n        return self.forward(data)\n\n    def forward(self, data):\n\n        return cut_piece5C(data, self.sigma)\n\n\nclass CutPiece6C:\n    def __init__(self, sigma):\n        self.sigma = sigma\n\n    def __call__(self, data):\n        return self.forward(data)\n\n    def forward(self, data):\n\n        return cut_piece6C(data, self.sigma)\n\n\nclass CutPiece7C:\n    def __init__(self, sigma):\n        self.sigma = sigma\n\n    def __call__(self, data):\n        return self.forward(data)\n\n    def forward(self, data):\n\n        return cut_piece7C(data, self.sigma)\n\n\nclass CutPiece8C:\n    def __init__(self, sigma):\n        self.sigma = sigma\n\n    def __call__(self, data):\n        return self.forward(data)\n\n    def forward(self, data):\n\n        return cut_piece8C(data, self.sigma)\n\n\nclass Jitter:\n    def __init__(self, sigma, p):\n        self.sigma = sigma\n        self.p = p\n\n    def __call__(self, data):\n        # print('### Jitter')\n\n        if random.random() < self.p:\n            return self.forward(data)\n        return data\n\n    def forward(self, data):\n\n        return jitter(data, sigma=self.sigma)\n\n\nclass Scaling:\n    def __init__(self, sigma, p):\n        self.sigma = sigma\n        self.p = p\n\n    def __call__(self, data):\n        # print('### Scaling')\n\n        if random.random() < self.p:\n            return self.forward(data)\n\n        return data\n\n    def forward(self, data):\n        return scaling_s(data, sigma=self.sigma)\n\n\nclass Cutout:\n    def __init__(self, sigma, p):\n        self.sigma = sigma\n        self.p = p\n\n    def __call__(self, data):\n        # print('### Cutout')\n\n        if random.random() < self.p:\n            return self.forward(data)\n        return data\n\n    def forward(self, data):\n        return cutout(data, self.sigma)\n\n\nclass MagnitudeWrap:\n    def __init__(self, sigma, knot, p):\n        self.sigma = sigma\n        self.knot = knot\n        self.p = p\n\n    def __call__(self, data):\n        # print('### MagnitudeWrap')\n\n        if random.random() < self.p:\n            return self.forward(data)\n\n        return data\n\n    def forward(self, data):\n        return magnitude_warp_s(data, sigma=self.sigma, knot=self.knot)\n\n\nclass TimeWarp:\n    def __init__(self, sigma, knot, p):\n        self.sigma = sigma\n        self.knot = knot\n        self.p = p\n\n    def __call__(self, data):\n        if random.random() < self.p:\n            return self.forward(data)\n\n        return data\n\n    def forward(self, data):\n        return time_warp_s(data, sigma=self.sigma, knot=self.knot)\n\n\nclass WindowSlice:\n    def __init__(self, reduce_ratio, p):\n        self.reduce_ratio = reduce_ratio\n        self.p = p\n\n    def __call__(self, data):\n        if random.random() < self.p:\n            return self.forward(data)\n\n        return data\n\n    def forward(self, data):\n        return window_slice_s(data, reduce_ratio=self.reduce_ratio)\n\n\nclass WindowWarp:\n    def __init__(self, window_ratio, scales, p):\n        self.window_ratio = window_ratio\n        self.scales = scales\n        self.p = p\n\n    def __call__(self, data):\n        if random.random() < self.p:\n            return self.forward(data)\n\n        return data\n\n    def forward(self, data):\n        return window_warp_s(data, window_ratio=self.window_ratio, scales=self.scales)\n\n\nclass ToTensor:\n    '''\n    Attributes\n    ----------\n    basic : convert numpy to PyTorch tensor\n\n    Methods\n    -------\n    forward(img=input_image)\n        Convert HWC OpenCV image into CHW PyTorch Tensor\n    '''\n    def __init__(self, basic=False):\n        self.basic = basic\n        self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n    def __call__(self, img):\n        return self.forward(img)\n\n    def forward(self, img):\n        '''\n        Parameters\n        ----------\n        img : opencv/numpy image\n\n        Returns\n        -------\n        Torch tensor\n            BGR -> RGB, [0, 255] -> [0, 1]\n        '''\n        ret = torch.from_numpy(img).type(torch.FloatTensor).to(self.device)\n        return ret\n\n\nclass Compose:\n    def __init__(self, transforms):\n        self.transforms = transforms\n\n    def __call__(self, img):\n        return self.forward(img)\n\n    def forward(self, img):\n        for t in self.transforms:\n            img = t(img)\n\n        return img\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/utils.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport json\n\ndef get_config_from_json(json_file):\n    \"\"\"\n    Get the config from a json file\n    :param json_file:\n    :return: config(dictionary)\n    \"\"\"\n    # parse the configurations from the config json file provided\n    with open(json_file, 'r') as config_file:\n        config_dict = json.load(config_file)\n\n    return config_dict\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/utils_plot.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\ndef show_samples(X_train, y_train, dataset_name, figname='', num_shown=5):\n    '''\n\n    :param X_train:\n    :param y_train:\n    :param shown_num:\n    :return:\n    '''\n    num_cls = np.max(y_train)+1\n\n    samples={}\n    for cls in range(num_cls):\n        idx = np.where(y_train==cls)[0]\n        # np.random.shuffle(idx)\n        samples[cls] = X_train[idx[:num_shown]]\n\n    plt.figure(figsize=(num_shown*3, num_cls))\n    for i in range(1, num_cls+1):\n        for j in range(1, num_shown+1):\n            plt.subplot(num_cls, num_shown, j+(i-1)*num_shown)\n            plt.plot(samples[i-1][j-1])\n    plt.tight_layout()\n\n    if not os.path.exists('Samples'):\n        os.makedirs('Samples')\n    plt.savefig('Samples/{}_{}.png'.format(dataset_name, figname))\n    plt.close()\n\n\n"
  },
  {
    "path": "ts_classification_methods/test/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/test/train_uea_test.py",
    "content": "import argparse\nimport time\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom data.dataloader import UEADataset\nfrom data.preprocessing import fill_nan_value, normalize_train_val_test, load_UEA, \\\n    normalize_uea_set\nfrom tsm_utils import build_model, set_seed, build_loss, evaluate, get_all_datasets, save_cls_result\n\nif __name__ == '__main__':\n    uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n               'Cricket', 'DuckDuckGeese', 'EigenWorms', 'Epilepsy', 'EthanolConcentration', 'ERing',\n               'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting',\n               'Heartbeat', 'InsectWingbeat', 'JapaneseVowels', 'Libras', 'LSST', 'MotorImagery',\n               'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1',\n               'SelfRegulationSCP2', 'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary']\n    # uea_all = ['StandWalkJump', 'UWaveGestureLibrary']\n    for dataset in uea_all:\n        # dataset = 'BasicMotions'\n        parser = argparse.ArgumentParser()\n        # Base setup\n        parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn or dilated')\n        parser.add_argument('--task', type=str, default='classification', help='classification or reconstruction')\n        parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n        # Dataset setup\n        parser.add_argument('--dataset', type=str, default=dataset, help='dataset(in ucr)')\n        parser.add_argument('--dataroot', type=str, default='/SSD/lz/Multivariate2018_arff', help='path of UCR folder')\n        parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n        parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n        parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n        parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n        # Dilated Convolution setup\n        parser.add_argument('--depth', type=int, default=3, help='depth of the dilated conv model')\n        parser.add_argument('--in_channels', type=int, default=1, help='input data channel')\n        parser.add_argument('--embedding_channels', type=int, default=40, help='mid layer channel')\n        parser.add_argument('--reduced_size', type=int, default=160, help='number of channels after Global max Pool')\n        parser.add_argument('--out_channels', type=int, default=320, help='number of channels after linear layer')\n        parser.add_argument('--kernel_size', type=int, default=3, help='convolution kernel size')\n\n        # training setup\n        parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n        parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n        parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n        parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n        parser.add_argument('--batch_size', type=int, default=128, help='(16, 128) larger batch size on the big dataset, ')\n        parser.add_argument('--epoch', type=int, default=20, help='training epoch')\n        parser.add_argument('--mode', type=str, default='directly_cls', help='train mode, default pretrain')\n        parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_tsm/result_tsm')\n        parser.add_argument('--save_csv_name', type=str, default='ex2_test_all_uea_0530_')\n        parser.add_argument('--continue_training', type=int, default=0, help='continue training')\n        parser.add_argument('--cuda', type=str, default='cuda:0')\n\n        # Decoder setup\n        parser.add_argument('--decoder_backbone', type=str, default='rnn', help='backbone of the decoder (rnn or fcn)')\n\n        # classifier setup\n        parser.add_argument('--classifier', type=str, default='nonlinear', help='type of classifier(linear or nonlinear)')\n        parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n        parser.add_argument('--classifier_embedding', type=int, default=128,\n                            help='embedding dim of the non linear classifier')\n\n        # fintune setup\n        parser.add_argument('--source_dataset', type=str, default=None, help='source dataset of the pretrained model')\n        parser.add_argument('--transfer_strategy', type=str, default='classification',\n                            help='classification or reconstruction')\n        # parser.add_argument('--direct_train')\n\n        args = parser.parse_args()\n\n        device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n        set_seed(args)\n\n        # sum_dataset, sum_target, num_classes = build_dataset(args)\n        sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n        args.num_classes = num_classes\n        args.seq_len = sum_dataset.shape[1]\n        args.input_size = sum_dataset.shape[2]\n        # print(\"test: sum_dataset.shape = \", sum_dataset.shape)\n        if sum_dataset.shape[0] * 0.6 < args.batch_size:\n            args.batch_size = 16\n\n        model, classifier = build_model(args)\n        model, classifier = model.to(device), classifier.to(device)\n        loss = build_loss(args).to(device)\n        model_init_state = model.state_dict()\n        classifier_init_state = classifier.state_dict()\n        # print(\"model = \", model)\n\n        if args.optimizer == 'adam':\n            optimizer = torch.optim.Adam([{'params': model.parameters()}, {'params': classifier.parameters()}],\n                                         lr=args.lr, weight_decay=args.weight_decay)\n        elif args.optimizer == 'sgd':\n            optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n        if args.mode == 'directly_cls':\n            print('start finetune on {}'.format(args.dataset))\n\n            train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n                sum_dataset, sum_target)\n\n            losses = []\n            test_accuracies = []\n            train_time = 0.0\n            end_val_epochs = []\n            for i, train_dataset in enumerate(train_datasets):\n                t = time.time()\n                model.load_state_dict(model_init_state)\n                classifier.load_state_dict(classifier_init_state)\n                print('{} fold start training and evaluate'.format(i))\n\n                train_target = train_targets[i]\n                val_dataset = val_datasets[i]\n                val_target = val_targets[i]\n\n                test_dataset = test_datasets[i]\n                test_target = test_targets[i]\n\n                train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n                # print(train_dataset.shape, val_dataset.shape, test_dataset.shape)\n                if test_dataset.shape[0] < args.batch_size:\n                    args.batch_size = args.batch_size // 2\n\n                if args.normalize_way == 'single':\n                    train_dataset = normalize_uea_set(train_dataset)\n                    val_dataset = normalize_uea_set(val_dataset)\n                    test_dataset = normalize_uea_set(test_dataset)\n                else:\n                    train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n                                                                                        test_dataset)\n\n                train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device),\n                                       torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(torch.int64))\n                val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device),\n                                     torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n                test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device),\n                                      torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n\n                train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True)\n                val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0)\n                test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0)\n\n                train_loss = []\n                train_accuracy = []\n                num_steps = args.epoch // args.batch_size\n\n                last_loss = float('inf')\n                stop_count = 0\n                increase_count = 0\n\n                test_accuracy = 0\n                min_val_loss = float('inf')\n                end_val_epoch = 0\n\n                num_steps = train_set.__len__() // args.batch_size\n                # print(\"test, args.batch_size = \", args.batch_size, \", num_steps = \", num_steps, train_set.__len__())\n                for epoch in range(args.epoch):\n                    # early stopping in finetune\n                    if stop_count == 50 or increase_count == 50:\n                        print('model convergent at epoch {}, early stopping'.format(epoch))\n                        break\n\n                    epoch_train_loss = 0\n                    epoch_train_acc = 0\n                    model.train()\n                    classifier.train()\n                    for x, y in train_loader:\n                        # print(\"type 2 = \", type(x))\n                        optimizer.zero_grad()\n                        pred = model(x)\n                        pred = classifier(pred)\n\n                        step_loss = loss(pred, y)\n                        step_loss.backward()\n                        optimizer.step()\n\n                        epoch_train_loss += step_loss.item()\n                        epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                    epoch_train_loss /= num_steps\n                    epoch_train_acc /= num_steps\n\n                    model.eval()\n                    classifier.eval()\n                    val_loss, val_accu = evaluate(val_loader, model, classifier, loss, device)\n                    if min_val_loss > val_loss:\n                        min_val_loss = val_loss\n                        end_val_epoch = epoch\n                        test_loss, test_accuracy = evaluate(test_loader, model, classifier, loss, device)\n\n                    if epoch % 100 == 0:\n                        print(\n                            \"epoch : {}, train loss: {} , train accuracy : {}, \\nval loss : {}, val accuracy : {}, \\ntest loss : {}, test accuracy : {}\".format(\n                                epoch, epoch_train_loss, epoch_train_acc, val_loss, val_accu, test_loss, test_accuracy))\n\n                    if abs(last_loss - val_loss) <= 1e-4:\n                        stop_count += 1\n                    else:\n                        stop_count = 0\n\n                    if val_loss > last_loss:\n                        increase_count += 1\n                    else:\n                        increase_count = 0\n\n                    last_loss = val_loss\n                test_accuracies.append(test_accuracy)\n                end_val_epochs.append(end_val_epoch)\n                t = time.time() - t\n                train_time += t\n\n                print('{} fold finish training'.format(i))\n\n            test_accuracies = torch.Tensor(test_accuracies)\n            end_val_epochs = np.array(end_val_epochs)\n            save_cls_result(args, test_accu=torch.mean(test_accuracies), test_std=torch.std(test_accuracies),\n                            train_time=train_time / 5, end_val_epoch=np.mean(end_val_epochs), seeds=args.random_seed)\n            print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/test/uea_test.py",
    "content": "import numpy as np\nimport torch\n\nfrom data.preprocessing import fill_nan_value, normalize_uea_set\nfrom data.preprocessing import load_UEA\n\nuea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Cricket', 'DuckDuckGeese', 'EigenWorms', 'Epilepsy', 'EthanolConcentration', 'ERing',\n           'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting',\n           'Heartbeat', 'InsectWingbeat', 'JapaneseVowels', 'Libras', 'LSST', 'MotorImagery',\n           'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1',\n           'SelfRegulationSCP2', 'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary']\n\nuea_all = ['FaceDetection']\n\ndataroot = '/SSD/lz/Multivariate2018_arff'\ni = 0\nfor dataset_name in uea_all:\n\n    sum_dataset, sum_target, num_classes = load_UEA(dataroot,\n                                                    dataset_name)  ## (num_size, series_length, num_dimensions)\n\n    series_length = []\n    for t_data in sum_dataset:\n        series_length, num_dimensions = t_data.shape\n        print(series_length, num_dimensions)\n    new_torch_sum = torch.tensor(sum_dataset).permute(0, 2, 1)\n    print(\"i = \", i, \", dataset_name = \", dataset_name, \", shape = \", sum_dataset.shape, new_torch_sum.shape,\n          num_classes)\n\n    if np.isnan(sum_dataset).any():\n        print(\"There has nan!!!!!!!!!!\")\n        sum_dataset, _, _ = fill_nan_value(sum_dataset, sum_dataset, sum_dataset)\n        sum_dataset = normalize_uea_set(sum_dataset)\n        if np.isnan(sum_dataset).any():\n            print(\"Still has nan!!!\")\n        else:\n            print(\"Mean imputation success!!!\")\n    i += 1\n"
  },
  {
    "path": "ts_classification_methods/timesnet/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/timesnet/main_timesnet.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom gpt4ts.gpt4ts_utils import load_UEA, normalize_uea_set, UEADataset, save_cls_new_result, set_seed, fill_nan_value, get_all_datasets, build_loss\n\nfrom timesnet.models.TimesNet import Model\n\n\n\ndef collate_fn(data, device, max_len=None):\n    \"\"\"Build mini-batch tensors from a list of (X, mask) tuples. Mask input. Create\n    Args:\n        data: len(batch_size) list of tuples (X, y).\n            - X: torch tensor of shape (seq_length, feat_dim); variable seq_length.\n            - y: torch tensor of shape (num_labels,) : class indices or numerical targets\n                (for classification or regression, respectively). num_labels > 1 for multi-task models\n        max_len: global fixed sequence length. Used for architectures requiring fixed length input,\n            where the batch length cannot vary dynamically. Longer sequences are clipped, shorter are padded with 0s\n    Returns:\n        X: (batch_size, padded_length, feat_dim) torch tensor of masked features (input)\n        targets: (batch_size, padded_length, feat_dim) torch tensor of unmasked features (output)\n        target_masks: (batch_size, padded_length, feat_dim) boolean torch tensor\n            0 indicates masked values to be predicted, 1 indicates unaffected/\"active\" feature values\n        padding_masks: (batch_size, padded_length) boolean tensor, 1 means keep vector at this position, 0 means padding\n    \"\"\"\n\n    batch_size = len(data)\n    features, labels = zip(*data)\n\n    # Stack and pad features and masks (convert 2D to 3D tensors, i.e. add batch dimension)\n    lengths = [X.shape[0] for X in features]  # original sequence length for each time series\n    if max_len is None:\n        max_len = max(lengths)\n\n    X = torch.zeros(batch_size, max_len, features[0].shape[-1])  # (batch_size, padded_length, feat_dim)\n    for i in range(batch_size):\n        end = min(lengths[i], max_len)\n        X[i, :end, :] = features[i][:end, :]\n\n    targets = torch.stack(labels, dim=0)  # (batch_size, num_labels)\n\n    padding_masks = padding_mask(torch.tensor(lengths, dtype=torch.int16),\n                                 max_len=max_len)  # (batch_size, padded_length) boolean tensor, \"1\" means keep\n\n    return X.to(device), targets.to(device), padding_masks.to(device)\n\n\ndef padding_mask(lengths, max_len=None):\n    \"\"\"\n    Used to mask padded positions: creates a (batch_size, max_len) boolean mask from a tensor of sequence lengths,\n    where 1 means keep element at this position (time step)\n    \"\"\"\n    batch_size = lengths.numel()\n    max_len = max_len or lengths.max_val()  # trick works because of overloading of 'or' operator for non-boolean types\n    return (torch.arange(0, max_len, device=lengths.device)\n            .type_as(lengths)\n            .repeat(batch_size, 1)\n            .lt(lengths.unsqueeze(1)))\n\n\n\ndef evaluate_gpt4ts(args, val_loader, model, loss):\n    val_loss = 0\n    val_accu = 0\n\n    sum_len = 0\n    for data, target, padding_x_mask in val_loader:\n        '''\n        data, target = data.to(device), target.to(device)\n        target = target.to(torch.int64)\n        '''\n        with torch.no_grad():\n            val_pred = model(data, padding_x_mask)\n            val_loss += loss(val_pred, target).item()\n            val_accu += torch.sum(torch.argmax(val_pred.data, axis=1) == target)\n            sum_len += len(target)\n\n    return val_loss / sum_len, val_accu / sum_len\n\n\nif __name__ == '__main__':  ##\n    parser = argparse.ArgumentParser()\n\n    # Base setup\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n    # UEA, TimesNet: ['EigenWorms', 'LSST', 'StandWalkJump']\n    # Dataset setup\n    parser.add_argument('--dataset', type=str, default='StandWalkJump',\n                        help='dataset(in ucr)')  # LSST Heartbeat Images  SelfRegulationSCP2\n    # parser.add_argument('--dataroot', type=str, default='../UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/dev_data/lz/time_series_pretrain/datasets/UCRArchive_2018',\n    #                     help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018', help='path of UCR folder')\n    parser.add_argument('--dataroot', type=str, default='/SSD/lz/Multivariate2018_arff', help='path of UEA folder')\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    # parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n    parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n    # parser.add_argument('--patch_size', type=int, default=8, help='patch_size')\n    # parser.add_argument('--stride', type=int, default=8, help='stride')\n\n    parser.add_argument('--target_points', type=int, default=96, help='forecast horizon')\n\n    # Patch\n    parser.add_argument('--patch_len', type=int, default=8, help='patch length')\n    parser.add_argument('--stride', type=int, default=8, help='stride between patch')\n\n    # # RevIN\n    # parser.add_argument('--revin', type=int, default=1, help='reversible instance normalization')\n    # # Model args\n    # parser.add_argument('--n_layers', type=int, default=3, help='number of Transformer layers')\n    # parser.add_argument('--n_heads', type=int, default=16, help='number of Transformer heads')\n    # # parser.add_argument('--d_model', type=int, default=128, help='Transformer d_model')\n    # parser.add_argument('--d_ff', type=int, default=256, help='Tranformer MLP dimension')\n    # parser.add_argument('--dropout', type=float, default=0.2, help='Transformer dropout')\n    # parser.add_argument('--head_dropout', type=float, default=0, help='head dropout')\n\n    # Semi training\n    parser.add_argument('--labeled_ratio', type=float, default='0.1', help='0.1, 0.2, 0.4')\n\n    # basic config\n    parser.add_argument('--task_name', type=str, required=False, default='classification',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=0, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # model define\n    parser.add_argument('--top_k', type=int, default=3, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=64, help='dimension of model')   ###\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=3, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=64, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=1, help='gpu')\n\n\n    # training setup\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--batch_size', type=int, default=8, help='')\n    parser.add_argument('--epoch', type=int, default=10, help='training epoch')\n    parser.add_argument('--cuda', type=str, default='cuda:1')\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_series_label_noise/result')\n    parser.add_argument('--save_csv_name', type=str, default='timesnet_UEA_supervised_0731_')\n\n    # classifier setup\n    parser.add_argument('--classifier', type=str, default='linear', help='type of classifier(linear or nonlinear)')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n\n    args = parser.parse_args()\n\n    device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n    set_seed(args)\n\n    # sum_dataset, sum_target, num_classes = build_dataset(args)\n    sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n    # args.num_classes = num_classes\n    # args.seq_len = sum_dataset.shape[1]\n\n    args.num_classes = num_classes\n    args.seq_len = sum_dataset.shape[1]\n    args.input_size = sum_dataset.shape[2]\n\n    args.enc_in = sum_dataset.shape[2]\n\n    # # get number of patches\n    # num_patch = (max(args.seq_len, args.patch_len) - args.patch_len) // args.stride + 1\n    # print('number of patches:', num_patch)\n\n    while sum_dataset.shape[0] * 0.6 < args.batch_size:\n        args.batch_size = args.batch_size // 2\n\n    print(\"args.batch_size = \", args.batch_size, \", sum_dataset.shape = \", sum_dataset.shape)\n\n    # get model\n    model = Model(configs=args)\n\n\n    # model = gpt4ts(max_seq_len=args.seq_len, num_classes=args.num_classes, var_len=args.input_size, patch_size=args.patch_size, stride=args.stride)\n    model = model.to(device)\n\n    # model, classifier = build_model(args)\n    # model, classifier = model.to(device), classifier.to(device)\n    loss = build_loss(args).to(device)\n\n    model_init_state = model.state_dict()\n    # classifier_init_state = classifier.state_dict()\n\n    if args.optimizer == 'adam':\n        optimizer = torch.optim.Adam([{'params': model.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    losses = []\n    test_accuracies = []\n    train_time = 0.0\n    end_val_epochs = []\n\n    for i, train_dataset in enumerate(train_datasets):\n        t = time.time()\n        model.load_state_dict(model_init_state)\n        # classifier.load_state_dict(classifier_init_state)\n        print('{} fold start training and evaluate'.format(i))\n\n        train_target = train_targets[i]\n        val_dataset = val_datasets[i]\n        val_target = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_uea_set(train_dataset)\n            val_dataset = normalize_uea_set(val_dataset)\n            test_dataset = normalize_uea_set(test_dataset)\n        # else:\n        #     train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n        #                                                                         test_dataset)\n\n        train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device).permute(0,2,1),\n                               torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device).permute(0,2,1),\n                             torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device).permute(0,2,1),\n                              torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n\n        # train_set = train_set.permute(0,2,1)\n\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True, collate_fn=lambda x: collate_fn(x, device, max_len=args.seq_len))\n        val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0, collate_fn=lambda x: collate_fn(x, device, max_len=args.seq_len))\n        test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0, collate_fn=lambda x: collate_fn(x, device, max_len=args.seq_len))\n\n        train_loss = []\n        train_accuracy = []\n        num_steps = args.epoch // args.batch_size\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        num_steps = train_set.__len__() // args.batch_size\n\n        min_val_loss = float('inf')\n        test_accuracy = 0\n        end_val_epoch = 0\n\n        for epoch in range(args.epoch):\n\n            if stop_count == 50 or increase_count == 50:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n\n            epoch_train_loss = 0\n            epoch_train_acc = 0\n            num_iterations = 0\n\n            model.train()\n            train_embed = []\n\n            for x, y, padding_x_mask in train_loader:\n                optimizer.zero_grad()\n                # print(\"raw x.shape = \", x.shape)\n                # xb, num_patch = create_patch(xb=x.permute(0,2,1), patch_len=args.patch_len, stride=args.stride)\n                # print(\"x padding_x_mask.shape = \", x.shape, padding_x_mask.shape, padding_x_mask[0][:10])\n\n                # print(\"x.shape = \", x.shape, \", padding_x_mask.shape = \", padding_x_mask.shape)\n                pred = model(x, padding_x_mask)\n                step_loss = loss(pred, y)\n\n                # step_loss.backward(retain_graph=True)\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_train_loss += step_loss.item()\n                epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                num_iterations += 1\n\n            epoch_train_loss /= num_steps\n            epoch_train_acc /= num_steps\n            # train_embed = np.concatenate(train_embed)\n\n            model.eval()\n\n            val_loss, val_accu = evaluate_gpt4ts(args, val_loader, model, loss)\n            if min_val_loss > val_loss:\n                min_val_loss = val_loss\n                end_val_epoch = epoch\n                test_loss, test_accuracy = evaluate_gpt4ts(args, test_loader, model, loss)\n\n            if abs(last_loss - val_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if val_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = val_loss\n\n            if epoch % 50 == 0:\n                print(\n                    \"epoch : {}, train loss: {} , train accuracy : {}, \\ntest_accuracy : {}\".format(\n                        epoch, epoch_train_loss, epoch_train_acc, test_accuracy))\n\n        test_accuracies.append(test_accuracy)\n        end_val_epochs.append(end_val_epoch)\n        t = time.time() - t\n        train_time += t\n\n        print('{} fold finish training'.format(i))\n\n    test_accuracies = torch.Tensor(test_accuracies)\n\n    print(\"Training end: mean_test_acc = \", round(torch.mean(test_accuracies).item(), 4),\n          \"traning time (seconds) = \",\n          round(train_time, 4), \", seed = \", args.random_seed)\n\n    test_accuracies = test_accuracies.cpu().numpy()\n\n    save_cls_new_result(args, np.mean(test_accuracies), np.max(test_accuracies), np.min(test_accuracies),\n                        np.std(test_accuracies), train_time)\n\n    print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/timesnet/main_timesnet_ucr.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom gpt4ts.gpt4ts_utils import load_UEA, normalize_uea_set, UEADataset, save_cls_new_result, set_seed, fill_nan_value, get_all_datasets, build_loss, build_dataset\n\nfrom timesnet.models.TimesNet import Model\n\n\n\ndef collate_fn(data, device, max_len=None):\n    \"\"\"Build mini-batch tensors from a list of (X, mask) tuples. Mask input. Create\n    Args:\n        data: len(batch_size) list of tuples (X, y).\n            - X: torch tensor of shape (seq_length, feat_dim); variable seq_length.\n            - y: torch tensor of shape (num_labels,) : class indices or numerical targets\n                (for classification or regression, respectively). num_labels > 1 for multi-task models\n        max_len: global fixed sequence length. Used for architectures requiring fixed length input,\n            where the batch length cannot vary dynamically. Longer sequences are clipped, shorter are padded with 0s\n    Returns:\n        X: (batch_size, padded_length, feat_dim) torch tensor of masked features (input)\n        targets: (batch_size, padded_length, feat_dim) torch tensor of unmasked features (output)\n        target_masks: (batch_size, padded_length, feat_dim) boolean torch tensor\n            0 indicates masked values to be predicted, 1 indicates unaffected/\"active\" feature values\n        padding_masks: (batch_size, padded_length) boolean tensor, 1 means keep vector at this position, 0 means padding\n    \"\"\"\n\n    batch_size = len(data)\n    features, labels = zip(*data)\n\n    # Stack and pad features and masks (convert 2D to 3D tensors, i.e. add batch dimension)\n    lengths = [X.shape[0] for X in features]  # original sequence length for each time series\n    if max_len is None:\n        max_len = max(lengths)\n\n    X = torch.zeros(batch_size, max_len, features[0].shape[-1])  # (batch_size, padded_length, feat_dim)\n    for i in range(batch_size):\n        end = min(lengths[i], max_len)\n        X[i, :end, :] = features[i][:end, :]\n\n    targets = torch.stack(labels, dim=0)  # (batch_size, num_labels)\n\n    padding_masks = padding_mask(torch.tensor(lengths, dtype=torch.int16),\n                                 max_len=max_len)  # (batch_size, padded_length) boolean tensor, \"1\" means keep\n\n    return X.to(device), targets.to(device), padding_masks.to(device)\n\n\ndef padding_mask(lengths, max_len=None):\n    \"\"\"\n    Used to mask padded positions: creates a (batch_size, max_len) boolean mask from a tensor of sequence lengths,\n    where 1 means keep element at this position (time step)\n    \"\"\"\n    batch_size = lengths.numel()\n    max_len = max_len or lengths.max_val()  # trick works because of overloading of 'or' operator for non-boolean types\n    return (torch.arange(0, max_len, device=lengths.device)\n            .type_as(lengths)\n            .repeat(batch_size, 1)\n            .lt(lengths.unsqueeze(1)))\n\n\n\ndef evaluate_gpt4ts(args, val_loader, model, loss):\n    val_loss = 0\n    val_accu = 0\n\n    sum_len = 0\n    for data, target, padding_x_mask in val_loader:\n        '''\n        data, target = data.to(device), target.to(device)\n        target = target.to(torch.int64)\n        '''\n        with torch.no_grad():\n            val_pred = model(data, padding_x_mask)\n            val_loss += loss(val_pred, target).item()\n            val_accu += torch.sum(torch.argmax(val_pred.data, axis=1) == target)\n            sum_len += len(target)\n\n    return val_loss / sum_len, val_accu / sum_len\n\n\nif __name__ == '__main__':  ##\n    parser = argparse.ArgumentParser()\n\n    # UCR, TimesNet: ['HandOutlines', 'InlineSkate', 'StarLightCurves']\n    # UEA, TimesNet: ['EigenWorms', 'LSST', 'StandWalkJump']\n\n    # Base setup\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n    # Dataset setup\n    parser.add_argument('--dataset', type=str, default='StarLightCurves',\n                        help='dataset(in ucr)')  # LSST Heartbeat Images  SelfRegulationSCP2\n    # parser.add_argument('--dataroot', type=str, default='../UCRArchive_2018', help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/dev_data/lz/time_series_pretrain/datasets/UCRArchive_2018',\n    #                     help='path of UCR folder')\n    # parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018', help='path of UCR folder')\n    parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018', help='path of UEA folder')\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    # parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n    parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n    # parser.add_argument('--patch_size', type=int, default=8, help='patch_size')\n    # parser.add_argument('--stride', type=int, default=8, help='stride')\n\n    parser.add_argument('--target_points', type=int, default=96, help='forecast horizon')\n\n    # Patch\n    parser.add_argument('--patch_len', type=int, default=8, help='patch length')\n    parser.add_argument('--stride', type=int, default=8, help='stride between patch')\n\n    # # RevIN\n    # parser.add_argument('--revin', type=int, default=1, help='reversible instance normalization')\n    # # Model args\n    # parser.add_argument('--n_layers', type=int, default=3, help='number of Transformer layers')\n    # parser.add_argument('--n_heads', type=int, default=16, help='number of Transformer heads')\n    # # parser.add_argument('--d_model', type=int, default=128, help='Transformer d_model')\n    # parser.add_argument('--d_ff', type=int, default=256, help='Tranformer MLP dimension')\n    # parser.add_argument('--dropout', type=float, default=0.2, help='Transformer dropout')\n    # parser.add_argument('--head_dropout', type=float, default=0, help='head dropout')\n\n    # Semi training\n    parser.add_argument('--labeled_ratio', type=float, default='0.1', help='0.1, 0.2, 0.4')\n\n    # basic config\n    parser.add_argument('--task_name', type=str, required=False, default='classification',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=0, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # model define\n    parser.add_argument('--top_k', type=int, default=3, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=64, help='dimension of model')   ###\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=3, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=64, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=1, help='gpu')\n\n\n    # training setup\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--batch_size', type=int, default=8, help='')\n    parser.add_argument('--epoch', type=int, default=50, help='training epoch')\n    parser.add_argument('--cuda', type=str, default='cuda:1')\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_series_label_noise/result')\n    parser.add_argument('--save_csv_name', type=str, default='timesnet_ucr_supervised_0801_')\n\n    # classifier setup\n    parser.add_argument('--classifier', type=str, default='linear', help='type of classifier(linear or nonlinear)')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n\n    args = parser.parse_args()\n\n    device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n    set_seed(args)\n\n    sum_dataset, sum_target, num_classes = build_dataset(args)\n    # sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n    # args.num_classes = num_classes\n    # args.seq_len = sum_dataset.shape[1]\n\n    sum_dataset = sum_dataset[:, :, np.newaxis]\n\n    args.num_classes = num_classes\n    args.seq_len = sum_dataset.shape[1]\n    args.input_size = sum_dataset.shape[2]\n\n    args.enc_in = sum_dataset.shape[2]\n\n    # # get number of patches\n    # num_patch = (max(args.seq_len, args.patch_len) - args.patch_len) // args.stride + 1\n    # print('number of patches:', num_patch)\n\n    while sum_dataset.shape[0] * 0.6 < args.batch_size:\n        args.batch_size = args.batch_size // 2\n\n    print(\"args.batch_size = \", args.batch_size, \", sum_dataset.shape = \", sum_dataset.shape)\n\n    # get model\n    model = Model(configs=args)\n\n\n    # model = gpt4ts(max_seq_len=args.seq_len, num_classes=args.num_classes, var_len=args.input_size, patch_size=args.patch_size, stride=args.stride)\n    model = model.to(device)\n\n    # model, classifier = build_model(args)\n    # model, classifier = model.to(device), classifier.to(device)\n    loss = build_loss(args).to(device)\n\n    model_init_state = model.state_dict()\n    # classifier_init_state = classifier.state_dict()\n\n    if args.optimizer == 'adam':\n        optimizer = torch.optim.Adam([{'params': model.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    losses = []\n    test_accuracies = []\n    train_time = 0.0\n    end_val_epochs = []\n\n    for i, train_dataset in enumerate(train_datasets):\n        t = time.time()\n        model.load_state_dict(model_init_state)\n        # classifier.load_state_dict(classifier_init_state)\n        print('{} fold start training and evaluate'.format(i))\n\n        train_target = train_targets[i]\n        val_dataset = val_datasets[i]\n        val_target = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_uea_set(train_dataset)\n            val_dataset = normalize_uea_set(val_dataset)\n            test_dataset = normalize_uea_set(test_dataset)\n        # else:\n        #     train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n        #                                                                         test_dataset)\n\n        train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device).permute(0,2,1),\n                               torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device).permute(0,2,1),\n                             torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n        test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device).permute(0,2,1),\n                              torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n\n        # train_set = train_set.permute(0,2,1)\n\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True, collate_fn=lambda x: collate_fn(x, device, max_len=args.seq_len))\n        val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0, collate_fn=lambda x: collate_fn(x, device, max_len=args.seq_len))\n        test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0, collate_fn=lambda x: collate_fn(x, device, max_len=args.seq_len))\n\n        train_loss = []\n        train_accuracy = []\n        num_steps = args.epoch // args.batch_size\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        num_steps = train_set.__len__() // args.batch_size\n\n        min_val_loss = float('inf')\n        test_accuracy = 0\n        end_val_epoch = 0\n\n        for epoch in range(args.epoch):\n\n            if stop_count == 80 or increase_count == 80:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n\n            epoch_train_loss = 0\n            epoch_train_acc = 0\n            num_iterations = 0\n\n            model.train()\n            train_embed = []\n\n            for x, y, padding_x_mask in train_loader:\n                optimizer.zero_grad()\n                # print(\"raw x.shape = \", x.shape)\n                # xb, num_patch = create_patch(xb=x.permute(0,2,1), patch_len=args.patch_len, stride=args.stride)\n                # print(\"x padding_x_mask.shape = \", x.shape, padding_x_mask.shape, padding_x_mask[0][:10])\n\n                pred = model(x, padding_x_mask)\n                step_loss = loss(pred, y)\n\n                # step_loss.backward(retain_graph=True)\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_train_loss += step_loss.item()\n                epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                num_iterations += 1\n\n            epoch_train_loss /= num_steps\n            epoch_train_acc /= num_steps\n            # train_embed = np.concatenate(train_embed)\n\n            model.eval()\n\n            val_loss, val_accu = evaluate_gpt4ts(args, val_loader, model, loss)\n            if min_val_loss > val_loss:\n                min_val_loss = val_loss\n                end_val_epoch = epoch\n                test_loss, test_accuracy = evaluate_gpt4ts(args, test_loader, model, loss)\n\n            if abs(last_loss - val_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if val_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = val_loss\n\n            if epoch % 50 == 0:\n                print(\n                    \"epoch : {}, train loss: {} , train accuracy : {}, \\ntest_accuracy : {}\".format(\n                        epoch, epoch_train_loss, epoch_train_acc, test_accuracy))\n\n        test_accuracies.append(test_accuracy)\n        end_val_epochs.append(end_val_epoch)\n        t = time.time() - t\n        train_time += t\n\n        print('{} fold finish training'.format(i))\n\n    test_accuracies = torch.Tensor(test_accuracies)\n\n    print(\"Training end: mean_test_acc = \", round(torch.mean(test_accuracies).item(), 4),\n          \"traning time (seconds) = \",\n          round(train_time, 4), \", seed = \", args.random_seed)\n\n    test_accuracies = test_accuracies.cpu().numpy()\n\n    save_cls_new_result(args, np.mean(test_accuracies), np.max(test_accuracies), np.min(test_accuracies),\n                        np.std(test_accuracies), train_time)\n\n    print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/timesnet/models/Conv_Blocks.py",
    "content": "import torch\nimport torch.nn as nn\n\n\nclass Inception_Block_V1(nn.Module):\n    def __init__(self, in_channels, out_channels, num_kernels=6, init_weight=True):\n        super(Inception_Block_V1, self).__init__()\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_kernels = num_kernels\n        kernels = []\n        for i in range(self.num_kernels):\n            kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=2 * i + 1, padding=i))\n        self.kernels = nn.ModuleList(kernels)\n        if init_weight:\n            self._initialize_weights()\n\n    def _initialize_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n                if m.bias is not None:\n                    nn.init.constant_(m.bias, 0)\n\n    def forward(self, x):\n        res_list = []\n        for i in range(self.num_kernels):\n            res_list.append(self.kernels[i](x))\n        res = torch.stack(res_list, dim=-1).mean(-1)\n        return res\n\n\nclass Inception_Block_V2(nn.Module):\n    def __init__(self, in_channels, out_channels, num_kernels=6, init_weight=True):\n        super(Inception_Block_V2, self).__init__()\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_kernels = num_kernels\n        kernels = []\n        for i in range(self.num_kernels // 2):\n            kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=[1, 2 * i + 3], padding=[0, i + 1]))\n            kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=[2 * i + 3, 1], padding=[i + 1, 0]))\n        kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=1))\n        self.kernels = nn.ModuleList(kernels)\n        if init_weight:\n            self._initialize_weights()\n\n    def _initialize_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n                if m.bias is not None:\n                    nn.init.constant_(m.bias, 0)\n\n    def forward(self, x):\n        res_list = []\n        for i in range(self.num_kernels + 1):\n            res_list.append(self.kernels[i](x))\n        res = torch.stack(res_list, dim=-1).mean(-1)\n        return res\n"
  },
  {
    "path": "ts_classification_methods/timesnet/models/Embed.py",
    "content": "import torch\nimport torch.nn as nn\nimport math\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, max_len=25000):\n        super(PositionalEmbedding, self).__init__()\n        # Compute the positional encodings once in log space.\n        pe = torch.zeros(max_len, d_model).float()\n        pe.require_grad = False\n\n        position = torch.arange(0, max_len).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n\n        pe = pe.unsqueeze(0)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        return self.pe[:, :x.size(1)]\n\n\nclass TokenEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(TokenEmbedding, self).__init__()\n        padding = 1 if torch.__version__ >= '1.5.0' else 2\n        self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,\n                                   kernel_size=3, padding=padding, padding_mode='circular', bias=False)\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.kaiming_normal_(\n                    m.weight, mode='fan_in', nonlinearity='leaky_relu')\n\n    def forward(self, x):\n        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)\n        return x\n\n\nclass FixedEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(FixedEmbedding, self).__init__()\n\n        w = torch.zeros(c_in, d_model).float()\n        w.require_grad = False\n\n        position = torch.arange(0, c_in).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        w[:, 0::2] = torch.sin(position * div_term)\n        w[:, 1::2] = torch.cos(position * div_term)\n\n        self.emb = nn.Embedding(c_in, d_model)\n        self.emb.weight = nn.Parameter(w, requires_grad=False)\n\n    def forward(self, x):\n        return self.emb(x).detach()\n\n\nclass TemporalEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='fixed', freq='h'):\n        super(TemporalEmbedding, self).__init__()\n\n        minute_size = 4\n        hour_size = 24\n        weekday_size = 7\n        day_size = 32\n        month_size = 13\n\n        Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding\n        if freq == 't':\n            self.minute_embed = Embed(minute_size, d_model)\n        self.hour_embed = Embed(hour_size, d_model)\n        self.weekday_embed = Embed(weekday_size, d_model)\n        self.day_embed = Embed(day_size, d_model)\n        self.month_embed = Embed(month_size, d_model)\n\n    def forward(self, x):\n        x = x.long()\n        minute_x = self.minute_embed(x[:, :, 4]) if hasattr(\n            self, 'minute_embed') else 0.\n        hour_x = self.hour_embed(x[:, :, 3])\n        weekday_x = self.weekday_embed(x[:, :, 2])\n        day_x = self.day_embed(x[:, :, 1])\n        month_x = self.month_embed(x[:, :, 0])\n\n        return hour_x + weekday_x + day_x + month_x + minute_x\n\n\nclass TimeFeatureEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='timeF', freq='h'):\n        super(TimeFeatureEmbedding, self).__init__()\n\n        freq_map = {'h': 4, 't': 5, 's': 6,\n                    'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3}\n        d_inp = freq_map[freq]\n        self.embed = nn.Linear(d_inp, d_model, bias=False)\n\n    def forward(self, x):\n        return self.embed(x)\n\n\nclass DataEmbedding(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x) + self.position_embedding(x)\n        else:\n            x = self.value_embedding(\n                x) + self.temporal_embedding(x_mark) + self.position_embedding(x)\n        return self.dropout(x)\n\n\nclass DataEmbedding_inverted(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_inverted, self).__init__()\n        self.value_embedding = nn.Linear(c_in, d_model)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        x = x.permute(0, 2, 1)\n        # x: [Batch Variate Time]\n        if x_mark is None:\n            x = self.value_embedding(x)\n        else:\n            x = self.value_embedding(torch.cat([x, x_mark.permute(0, 2, 1)], 1))\n        # x: [Batch Variate d_model]\n        return self.dropout(x)\n\n\nclass DataEmbedding_wo_pos(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_wo_pos, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x)\n        else:\n            x = self.value_embedding(x) + self.temporal_embedding(x_mark)\n        return self.dropout(x)\n\n\nclass PatchEmbedding(nn.Module):\n    def __init__(self, d_model, patch_len, stride, padding, dropout):\n        super(PatchEmbedding, self).__init__()\n        # Patching\n        self.patch_len = patch_len\n        self.stride = stride\n        self.padding_patch_layer = nn.ReplicationPad1d((0, padding))\n\n        # Backbone, Input encoding: projection of feature vectors onto a d-dim vector space\n        self.value_embedding = nn.Linear(patch_len, d_model, bias=False)\n\n        # Positional embedding\n        self.position_embedding = PositionalEmbedding(d_model)\n\n        # Residual dropout\n        self.dropout = nn.Dropout(dropout)\n\n    def forward(self, x):\n        # do patching\n        n_vars = x.shape[1]\n        x = self.padding_patch_layer(x)\n        x = x.unfold(dimension=-1, size=self.patch_len, step=self.stride)\n        x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3]))\n        # Input encoding\n        x = self.value_embedding(x) + self.position_embedding(x)\n        return self.dropout(x), n_vars\n"
  },
  {
    "path": "ts_classification_methods/timesnet/models/SelfAttention_Family.py",
    "content": "import torch\nimport torch.nn as nn\nimport numpy as np\nfrom math import sqrt\nfrom einops import rearrange, repeat\n\n\nclass TriangularCausalMask():\n    def __init__(self, B, L, device=\"cpu\"):\n        mask_shape = [B, 1, L, L]\n        with torch.no_grad():\n            self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)\n\n    @property\n    def mask(self):\n        return self._mask\n\n\nclass ProbMask():\n    def __init__(self, B, H, L, index, scores, device=\"cpu\"):\n        _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1)\n        _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1])\n        indicator = _mask_ex[torch.arange(B)[:, None, None],\n                    torch.arange(H)[None, :, None],\n                    index, :].to(device)\n        self._mask = indicator.view(scores.shape).to(device)\n\n    @property\n    def mask(self):\n        return self._mask\n\n\n\nclass DSAttention(nn.Module):\n    '''De-stationary Attention'''\n\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n        super(DSAttention, self).__init__()\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        scale = self.scale or 1. / sqrt(E)\n\n        tau = 1.0 if tau is None else tau.unsqueeze(\n            1).unsqueeze(1)  # B x 1 x 1 x 1\n        delta = 0.0 if delta is None else delta.unsqueeze(\n            1).unsqueeze(1)  # B x 1 x 1 x S\n\n        # De-stationary Attention, rescaling pre-softmax score with learned de-stationary factors\n        scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys) * tau + delta\n\n        if self.mask_flag:\n            if attn_mask is None:\n                attn_mask = TriangularCausalMask(B, L, device=queries.device)\n\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        A = self.dropout(torch.softmax(scale * scores, dim=-1))\n        V = torch.einsum(\"bhls,bshd->blhd\", A, values)\n\n        if self.output_attention:\n            return V.contiguous(), A\n        else:\n            return V.contiguous(), None\n\n\nclass FullAttention(nn.Module):\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n        super(FullAttention, self).__init__()\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        scale = self.scale or 1. / sqrt(E)\n\n        scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys)\n\n        if self.mask_flag:\n            if attn_mask is None:\n                attn_mask = TriangularCausalMask(B, L, device=queries.device)\n\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        A = self.dropout(torch.softmax(scale * scores, dim=-1))\n        V = torch.einsum(\"bhls,bshd->blhd\", A, values)\n\n        if self.output_attention:\n            return V.contiguous(), A\n        else:\n            return V.contiguous(), None\n\n\nclass ProbAttention(nn.Module):\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n        super(ProbAttention, self).__init__()\n        self.factor = factor\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def _prob_QK(self, Q, K, sample_k, n_top):  # n_top: c*ln(L_q)\n        # Q [B, H, L, D]\n        B, H, L_K, E = K.shape\n        _, _, L_Q, _ = Q.shape\n\n        # calculate the sampled Q_K\n        K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E)\n        # real U = U_part(factor*ln(L_k))*L_q\n        index_sample = torch.randint(L_K, (L_Q, sample_k))\n        K_sample = K_expand[:, :, torch.arange(\n            L_Q).unsqueeze(1), index_sample, :]\n        Q_K_sample = torch.matmul(\n            Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze()\n\n        # find the Top_k query with sparisty measurement\n        M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K)\n        M_top = M.topk(n_top, sorted=False)[1]\n\n        # use the reduced Q to calculate Q_K\n        Q_reduce = Q[torch.arange(B)[:, None, None],\n                   torch.arange(H)[None, :, None],\n                   M_top, :]  # factor*ln(L_q)\n        Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1))  # factor*ln(L_q)*L_k\n\n        return Q_K, M_top\n\n    def _get_initial_context(self, V, L_Q):\n        B, H, L_V, D = V.shape\n        if not self.mask_flag:\n            # V_sum = V.sum(dim=-2)\n            V_sum = V.mean(dim=-2)\n            contex = V_sum.unsqueeze(-2).expand(B, H,\n                                                L_Q, V_sum.shape[-1]).clone()\n        else:  # use mask\n            # requires that L_Q == L_V, i.e. for self-attention only\n            assert (L_Q == L_V)\n            contex = V.cumsum(dim=-2)\n        return contex\n\n    def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):\n        B, H, L_V, D = V.shape\n\n        if self.mask_flag:\n            attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device)\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        attn = torch.softmax(scores, dim=-1)  # nn.Softmax(dim=-1)(scores)\n\n        context_in[torch.arange(B)[:, None, None],\n        torch.arange(H)[None, :, None],\n        index, :] = torch.matmul(attn, V).type_as(context_in)\n        if self.output_attention:\n            attns = (torch.ones([B, H, L_V, L_V]) /\n                     L_V).type_as(attn).to(attn.device)\n            attns[torch.arange(B)[:, None, None], torch.arange(H)[\n                                                  None, :, None], index, :] = attn\n            return context_in, attns\n        else:\n            return context_in, None\n\n    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n        B, L_Q, H, D = queries.shape\n        _, L_K, _, _ = keys.shape\n\n        queries = queries.transpose(2, 1)\n        keys = keys.transpose(2, 1)\n        values = values.transpose(2, 1)\n\n        U_part = self.factor * \\\n                 np.ceil(np.log(L_K)).astype('int').item()  # c*ln(L_k)\n        u = self.factor * \\\n            np.ceil(np.log(L_Q)).astype('int').item()  # c*ln(L_q)\n\n        U_part = U_part if U_part < L_K else L_K\n        u = u if u < L_Q else L_Q\n\n        scores_top, index = self._prob_QK(\n            queries, keys, sample_k=U_part, n_top=u)\n\n        # add scale factor\n        scale = self.scale or 1. / sqrt(D)\n        if scale is not None:\n            scores_top = scores_top * scale\n        # get the context\n        context = self._get_initial_context(values, L_Q)\n        # update the context with selected top_k queries\n        context, attn = self._update_context(\n            context, values, scores_top, index, L_Q, attn_mask)\n\n        return context.contiguous(), attn\n\n\nclass AttentionLayer(nn.Module):\n    def __init__(self, attention, d_model, n_heads, d_keys=None,\n                 d_values=None):\n        super(AttentionLayer, self).__init__()\n\n        d_keys = d_keys or (d_model // n_heads)\n        d_values = d_values or (d_model // n_heads)\n\n        self.inner_attention = attention\n        self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.value_projection = nn.Linear(d_model, d_values * n_heads)\n        self.out_projection = nn.Linear(d_values * n_heads, d_model)\n        self.n_heads = n_heads\n\n    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n        B, L, _ = queries.shape\n        _, S, _ = keys.shape\n        H = self.n_heads\n\n        queries = self.query_projection(queries).view(B, L, H, -1)\n        keys = self.key_projection(keys).view(B, S, H, -1)\n        values = self.value_projection(values).view(B, S, H, -1)\n\n        out, attn = self.inner_attention(\n            queries,\n            keys,\n            values,\n            attn_mask,\n            tau=tau,\n            delta=delta\n        )\n        out = out.view(B, L, -1)\n\n        return self.out_projection(out), attn\n\n\n# class ReformerLayer(nn.Module):\n#     def __init__(self, attention, d_model, n_heads, d_keys=None,\n#                  d_values=None, causal=False, bucket_size=4, n_hashes=4):\n#         super().__init__()\n#         self.bucket_size = bucket_size\n#         self.attn = LSHSelfAttention(\n#             dim=d_model,\n#             heads=n_heads,\n#             bucket_size=bucket_size,\n#             n_hashes=n_hashes,\n#             causal=causal\n#         )\n#\n#     def fit_length(self, queries):\n#         # inside reformer: assert N % (bucket_size * 2) == 0\n#         B, N, C = queries.shape\n#         if N % (self.bucket_size * 2) == 0:\n#             return queries\n#         else:\n#             # fill the time series\n#             fill_len = (self.bucket_size * 2) - (N % (self.bucket_size * 2))\n#             return torch.cat([queries, torch.zeros([B, fill_len, C]).to(queries.device)], dim=1)\n#\n#     def forward(self, queries, keys, values, attn_mask, tau, delta):\n#         # in Reformer: defalut queries=keys\n#         B, N, C = queries.shape\n#         queries = self.attn(self.fit_length(queries))[:, :N, :]\n#         return queries, None\n\n\nclass TwoStageAttentionLayer(nn.Module):\n    '''\n    The Two Stage Attention (TSA) Layer\n    input/output shape: [batch_size, Data_dim(D), Seg_num(L), d_model]\n    '''\n\n    def __init__(self, configs,\n                 seg_num, factor, d_model, n_heads, d_ff=None, dropout=0.1):\n        super(TwoStageAttentionLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.time_attention = AttentionLayer(FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                                           output_attention=configs.output_attention), d_model, n_heads)\n        self.dim_sender = AttentionLayer(FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                                       output_attention=configs.output_attention), d_model, n_heads)\n        self.dim_receiver = AttentionLayer(FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                                         output_attention=configs.output_attention), d_model, n_heads)\n        self.router = nn.Parameter(torch.randn(seg_num, factor, d_model))\n\n        self.dropout = nn.Dropout(dropout)\n\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.norm3 = nn.LayerNorm(d_model)\n        self.norm4 = nn.LayerNorm(d_model)\n\n        self.MLP1 = nn.Sequential(nn.Linear(d_model, d_ff),\n                                  nn.GELU(),\n                                  nn.Linear(d_ff, d_model))\n        self.MLP2 = nn.Sequential(nn.Linear(d_model, d_ff),\n                                  nn.GELU(),\n                                  nn.Linear(d_ff, d_model))\n\n    def forward(self, x, attn_mask=None, tau=None, delta=None):\n        # Cross Time Stage: Directly apply MSA to each dimension\n        batch = x.shape[0]\n        time_in = rearrange(x, 'b ts_d seg_num d_model -> (b ts_d) seg_num d_model')\n        time_enc, attn = self.time_attention(\n            time_in, time_in, time_in, attn_mask=None, tau=None, delta=None\n        )\n        dim_in = time_in + self.dropout(time_enc)\n        dim_in = self.norm1(dim_in)\n        dim_in = dim_in + self.dropout(self.MLP1(dim_in))\n        dim_in = self.norm2(dim_in)\n\n        # Cross Dimension Stage: use a small set of learnable vectors to aggregate and distribute messages to build the D-to-D connection\n        dim_send = rearrange(dim_in, '(b ts_d) seg_num d_model -> (b seg_num) ts_d d_model', b=batch)\n        batch_router = repeat(self.router, 'seg_num factor d_model -> (repeat seg_num) factor d_model', repeat=batch)\n        dim_buffer, attn = self.dim_sender(batch_router, dim_send, dim_send, attn_mask=None, tau=None, delta=None)\n        dim_receive, attn = self.dim_receiver(dim_send, dim_buffer, dim_buffer, attn_mask=None, tau=None, delta=None)\n        dim_enc = dim_send + self.dropout(dim_receive)\n        dim_enc = self.norm3(dim_enc)\n        dim_enc = dim_enc + self.dropout(self.MLP2(dim_enc))\n        dim_enc = self.norm4(dim_enc)\n\n        final_out = rearrange(dim_enc, '(b seg_num) ts_d d_model -> b ts_d seg_num d_model', b=batch)\n\n        return final_out\n"
  },
  {
    "path": "ts_classification_methods/timesnet/models/TimesNet.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft\nfrom timesnet.models.Embed import DataEmbedding\nfrom timesnet.models.Conv_Blocks import Inception_Block_V1\n\n\ndef FFT_for_Period(x, k=2):\n    # [B, T, C]\n    xf = torch.fft.rfft(x, dim=1)\n    # find period by amplitudes\n    frequency_list = abs(xf).mean(0).mean(-1)\n    frequency_list[0] = 0\n    _, top_list = torch.topk(frequency_list, k)\n    top_list = top_list.detach().cpu().numpy()\n    period = x.shape[1] // top_list\n\n    # print(\"period.shape = \", period.shape, top_list.shape, top_list, period)\n    return period, abs(xf).mean(-1)[:, top_list]\n\n\nclass TimesBlock(nn.Module):\n    def __init__(self, configs):\n        super(TimesBlock, self).__init__()\n        self.seq_len = configs.seq_len\n        self.pred_len = configs.pred_len\n        self.k = configs.top_k\n        # parameter-efficient design\n        self.conv = nn.Sequential(\n            Inception_Block_V1(configs.d_model, configs.d_ff,\n                               num_kernels=configs.num_kernels),\n            nn.GELU(),\n            Inception_Block_V1(configs.d_ff, configs.d_model,\n                               num_kernels=configs.num_kernels)\n        )\n\n    def forward(self, x):\n        # print(\"Input shape:\", x.shape)\n        B, T, N = x.size()\n        period_list, period_weight = FFT_for_Period(x, self.k)\n\n        # print(\"period_list shape = \", period_list.shape, period_list)\n\n        # print(\"period_list period_weight shape:\", period_list.shape, period_weight.shape, self.k, self.seq_len, self.pred_len)\n\n        res = []\n        for i in range(self.k):\n            period = period_list[i]\n            # padding\n            if (self.seq_len + self.pred_len) % period != 0:\n                length = (\n                                 ((self.seq_len + self.pred_len) // period) + 1) * period\n\n                # print(\"length = \", length, self.seq_len, self.pred_len, period)\n                padding = torch.zeros([x.shape[0], (length - (self.seq_len + self.pred_len)), x.shape[2]]).to(x.device)\n\n                # print(\"padding x shape = \", padding.shape, x.shape)\n                out = torch.cat([x, padding], dim=1)\n                # print(\"padding out shape = \", out.shape)\n            else:\n                length = (self.seq_len + self.pred_len)\n                out = x\n\n            # print(\"out.shape = \", out.shape, length, period, length // period, N )\n            # reshape\n            out = out.reshape(B, length // period, period,\n                              N).permute(0, 3, 1, 2).contiguous()\n            # 2D conv: from 1d Variation to 2d Variation\n            out = self.conv(out)\n            # reshape back\n            out = out.permute(0, 2, 3, 1).reshape(B, -1, N)\n            res.append(out[:, :(self.seq_len + self.pred_len), :])\n        res = torch.stack(res, dim=-1)\n        # adaptive aggregation\n        period_weight = F.softmax(period_weight, dim=1)\n        period_weight = period_weight.unsqueeze(\n            1).unsqueeze(1).repeat(1, T, N, 1)\n        res = torch.sum(res * period_weight, -1)\n        # residual connection\n        res = res + x\n        return res\n\n\nclass Model(nn.Module):\n    \"\"\"\n    Paper link: https://openreview.net/pdf?id=ju_Uqw384Oq\n    \"\"\"\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        self.configs = configs\n        self.task_name = configs.task_name\n        self.seq_len = configs.seq_len\n        self.label_len = configs.label_len\n        self.pred_len = configs.pred_len\n        self.model = nn.ModuleList([TimesBlock(configs)\n                                    for _ in range(configs.e_layers)])\n        self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,\n                                           configs.dropout)\n        self.layer = configs.e_layers\n        self.layer_norm = nn.LayerNorm(configs.d_model)\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            self.predict_linear = nn.Linear(\n                self.seq_len, self.pred_len + self.seq_len)\n            self.projection = nn.Linear(\n                configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'imputation' or self.task_name == 'anomaly_detection':\n            self.projection = nn.Linear(\n                configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'classification':\n            self.act = F.gelu\n            self.dropout = nn.Dropout(configs.dropout)\n            self.projection = nn.Linear(\n                configs.d_model * configs.seq_len, configs.num_classes)\n\n    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(\n            torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)  # [B,T,C]\n        enc_out = self.predict_linear(enc_out.permute(0, 2, 1)).permute(\n            0, 2, 1)  # align temporal dimension\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):\n        # Normalization from Non-stationary Transformer\n        means = torch.sum(x_enc, dim=1) / torch.sum(mask == 1, dim=1)\n        means = means.unsqueeze(1).detach()\n        x_enc = x_enc - means\n        x_enc = x_enc.masked_fill(mask == 0, 0)\n        stdev = torch.sqrt(torch.sum(x_enc * x_enc, dim=1) /\n                           torch.sum(mask == 1, dim=1) + 1e-5)\n        stdev = stdev.unsqueeze(1).detach()\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def anomaly_detection(self, x_enc):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(\n            torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, None)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def classification(self, x_enc, x_mark_enc):\n        # embedding\n        enc_out = self.enc_embedding(x_enc, None)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n\n        # Output\n        # the output transformer encoder/decoder embeddings don't include non-linearity\n        output = self.act(enc_out)\n        output = self.dropout(output)\n        # zero-out padding embeddings\n        output = output * x_mark_enc.unsqueeze(-1)\n        # (batch_size, seq_length * d_model)\n        output = output.reshape(output.shape[0], -1)\n        output = self.projection(output)  # (batch_size, num_classes)\n        return output\n\n    def forward(self, x_enc, x_mark_enc, x_dec=None, x_mark_dec=None, mask=None):\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(\n                x_enc, x_mark_enc, x_dec, x_mark_dec, mask)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc, x_mark_enc)\n            return dec_out  # [B, N]\n        return None\n"
  },
  {
    "path": "ts_classification_methods/timesnet/models/Transformer.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft\nfrom timesnet.models.Embed import DataEmbedding\nfrom timesnet.models.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer\nfrom timesnet.models.SelfAttention_Family import FullAttention, AttentionLayer\n\n\nclass Model(nn.Module):\n    \"\"\"\n    Vanilla Transformer\n    with O(L^2) complexity\n    Paper link: https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf\n    \"\"\"\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        self.task_name = configs.task_name\n        self.pred_len = configs.pred_len\n        self.output_attention = configs.output_attention\n        # Embedding\n        self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,\n                                           configs.dropout)\n        # Encoder\n        self.encoder = Encoder(\n            [\n                EncoderLayer(\n                    AttentionLayer(\n                        FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                      output_attention=configs.output_attention), configs.d_model, configs.n_heads),\n                    configs.d_model,\n                    configs.d_ff,\n                    dropout=configs.dropout,\n                    activation=configs.activation\n                ) for l in range(configs.e_layers)\n            ],\n            norm_layer=torch.nn.LayerNorm(configs.d_model)\n        )\n        # Decoder\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            self.dec_embedding = DataEmbedding(configs.dec_in, configs.d_model, configs.embed, configs.freq,\n                                               configs.dropout)\n            self.decoder = Decoder(\n                [\n                    DecoderLayer(\n                        AttentionLayer(\n                            FullAttention(True, configs.factor, attention_dropout=configs.dropout,\n                                          output_attention=False),\n                            configs.d_model, configs.n_heads),\n                        AttentionLayer(\n                            FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                          output_attention=False),\n                            configs.d_model, configs.n_heads),\n                        configs.d_model,\n                        configs.d_ff,\n                        dropout=configs.dropout,\n                        activation=configs.activation,\n                    )\n                    for l in range(configs.d_layers)\n                ],\n                norm_layer=torch.nn.LayerNorm(configs.d_model),\n                projection=nn.Linear(configs.d_model, configs.c_out, bias=True)\n            )\n        if self.task_name == 'imputation':\n            self.projection = nn.Linear(configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'anomaly_detection':\n            self.projection = nn.Linear(configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'classification':\n            self.act = F.gelu\n            self.dropout = nn.Dropout(configs.dropout)\n            self.projection = nn.Linear(configs.d_model * configs.seq_len, configs.num_classes)\n\n    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        # Embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        dec_out = self.dec_embedding(x_dec, x_mark_dec)\n        dec_out = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None)\n        return dec_out\n\n    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):\n        # Embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        dec_out = self.projection(enc_out)\n        return dec_out\n\n    def anomaly_detection(self, x_enc):\n        # Embedding\n        enc_out = self.enc_embedding(x_enc, None)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        dec_out = self.projection(enc_out)\n        return dec_out\n\n    def classification(self, x_enc, x_mark_enc):\n\n        # print(\"1 x_enc.shape = \", x_enc.shape, x_mark_enc.shape)\n        # Embedding\n        enc_out = self.enc_embedding(x_enc, None)\n        # print(\"2 x_enc.shape = \", enc_out.shape)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        # print(\"3 enc_out.shape = \", enc_out.shape)\n\n\n        # Output\n        output = self.act(enc_out)  # the output transformer encoder/decoder embeddings don't include non-linearity\n\n        # print(\"4 output.shape = \", output.shape)\n\n        output = self.dropout(output)\n\n        # print(\"5 output.shape = \", output.shape)\n\n        output = output * x_mark_enc.unsqueeze(-1)  # zero-out padding embeddings\n\n        # print(\"6 output.shape = \", output.shape)\n\n\n        output = output.reshape(output.shape[0], -1)  # (batch_size, seq_length * d_model)\n\n        # print(\"7 output.shape = \", output.shape)\n\n        output = self.projection(output)  # (batch_size, num_classes)\n        return output\n\n    def forward(self, x_enc, x_mark_enc, x_dec=None, x_mark_dec=None, mask=None):\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(x_enc, x_mark_enc, x_dec, x_mark_dec, mask)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc, x_mark_enc)\n            return dec_out  # [B, N]\n        return None\n"
  },
  {
    "path": "ts_classification_methods/timesnet/models/Transformer_EncDec.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvLayer(nn.Module):\n    def __init__(self, c_in):\n        super(ConvLayer, self).__init__()\n        self.downConv = nn.Conv1d(in_channels=c_in,\n                                  out_channels=c_in,\n                                  kernel_size=3,\n                                  padding=2,\n                                  padding_mode='circular')\n        self.norm = nn.BatchNorm1d(c_in)\n        self.activation = nn.ELU()\n        self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)\n\n    def forward(self, x):\n        x = self.downConv(x.permute(0, 2, 1))\n        x = self.norm(x)\n        x = self.activation(x)\n        x = self.maxPool(x)\n        x = x.transpose(1, 2)\n        return x\n\n\nclass EncoderLayer(nn.Module):\n    def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation=\"relu\"):\n        super(EncoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.attention = attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, attn_mask=None, tau=None, delta=None):\n        new_x, attn = self.attention(\n            x, x, x,\n            attn_mask=attn_mask,\n            tau=tau, delta=delta\n        )\n        x = x + self.dropout(new_x)\n\n        y = x = self.norm1(x)\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n\n        return self.norm2(x + y), attn\n\n\nclass Encoder(nn.Module):\n    def __init__(self, attn_layers, conv_layers=None, norm_layer=None):\n        super(Encoder, self).__init__()\n        self.attn_layers = nn.ModuleList(attn_layers)\n        self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None\n        self.norm = norm_layer\n\n    def forward(self, x, attn_mask=None, tau=None, delta=None):\n        # x [B, L, D]\n        attns = []\n        if self.conv_layers is not None:\n            for i, (attn_layer, conv_layer) in enumerate(zip(self.attn_layers, self.conv_layers)):\n                delta = delta if i == 0 else None\n                x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)\n                x = conv_layer(x)\n                attns.append(attn)\n            x, attn = self.attn_layers[-1](x, tau=tau, delta=None)\n            attns.append(attn)\n        else:\n            for attn_layer in self.attn_layers:\n                x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)\n                attns.append(attn)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        return x, attns\n\n\nclass DecoderLayer(nn.Module):\n    def __init__(self, self_attention, cross_attention, d_model, d_ff=None,\n                 dropout=0.1, activation=\"relu\"):\n        super(DecoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.self_attention = self_attention\n        self.cross_attention = cross_attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.norm3 = nn.LayerNorm(d_model)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):\n        x = x + self.dropout(self.self_attention(\n            x, x, x,\n            attn_mask=x_mask,\n            tau=tau, delta=None\n        )[0])\n        x = self.norm1(x)\n\n        x = x + self.dropout(self.cross_attention(\n            x, cross, cross,\n            attn_mask=cross_mask,\n            tau=tau, delta=delta\n        )[0])\n\n        y = x = self.norm2(x)\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n\n        return self.norm3(x + y)\n\n\nclass Decoder(nn.Module):\n    def __init__(self, layers, norm_layer=None, projection=None):\n        super(Decoder, self).__init__()\n        self.layers = nn.ModuleList(layers)\n        self.norm = norm_layer\n        self.projection = projection\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):\n        for layer in self.layers:\n            x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask, tau=tau, delta=delta)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        if self.projection is not None:\n            x = self.projection(x)\n        return x\n"
  },
  {
    "path": "ts_classification_methods/timesnet/models/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/timesnet/scripts/generator_timesnet.py",
    "content": "uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Cricket', 'DuckDuckGeese', 'EigenWorms', 'Epilepsy', 'EthanolConcentration', 'ERing',\n           'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting',\n           'Heartbeat', 'InsectWingbeat', 'JapaneseVowels', 'Libras', 'LSST', 'MotorImagery',\n           'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1',\n           'SelfRegulationSCP2', 'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary']\n\n\nucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n               'Beef',\n               'BeetleFly', 'BirdChicken', 'CBF', 'Car', 'Chinatown', 'ChlorineConcentration', 'CinCECGTorso', 'Coffee',\n               'Computers',\n               'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction', 'DistalPhalanxOutlineAgeGroup',\n               'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame', 'DodgerLoopWeekend',\n               'ECG200', 'ECG5000', 'ECGFiveDays', 'EOGHorizontalSignal', 'EOGVerticalSignal', 'Earthquakes',\n               'ElectricDevices',\n               'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords', 'Fish', 'FordA', 'FordB',\n               'FreezerRegularTrain',\n               'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3', 'GesturePebbleZ1',\n               'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan', 'GunPointMaleVersusFemale', 'GunPointOldVersusYoung',\n               'Ham',\n               'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain',\n               'InsectEPGSmallTrain',\n               'InsectWingbeatSound', 'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2', 'Lightning7',\n               'Mallat', 'Meat',\n               'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',\n               'MiddlePhalanxTW', 'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain',\n               'NonInvasiveFetalECGThorax1',\n               'NonInvasiveFetalECGThorax2', 'OSULeaf', 'OliveOil', 'PLAID', 'PhalangesOutlinesCorrect', 'Phoneme',\n               'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'Plane', 'PowerCons',\n               'ProximalPhalanxOutlineAgeGroup', 'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',\n               'RefrigerationDevices',\n               'Rock', 'ScreenType', 'SemgHandGenderCh2', 'SemgHandMovementCh2', 'SemgHandSubjectCh2',\n               'ShakeGestureWiimoteZ',\n               'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace', 'SonyAIBORobotSurface1',\n               'SonyAIBORobotSurface2', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',\n               'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'TwoPatterns', 'UMD',\n               'UWaveGestureLibraryAll',\n               'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine', 'WordSynonyms',\n               'Worms',\n               'WormsTwoClass', 'Yoga']\n\n\ncode_main = 'main_timesnet_ucr'  ##  main_timesnet_ucr  main_timesnet\n\ni = 1\nfor dataset in ucr_dataset:\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n\n    save_csv_name = code_main + '_0702_'  ##  --len_k\n\n    with open('/SSD/lz/time_series_label_noise/timesnet/scripts/timesnet_uea.sh', 'a') as f:\n        f.write('python '+ code_main + '.py ' +\n                '--dataset ' + dataset\n                + ' --epoch 1000 ' +\n                '--save_csv_name ' + save_csv_name + ' --cuda cuda:1' + ';\\n')\n\n## nohup ./scripts/timesnet_uea.sh &\n## nohup ./scripts/uea_transform.sh &"
  },
  {
    "path": "ts_classification_methods/tloss_cls/default_hyperparameters.json",
    "content": "{\n    \"batch_size\": 10,\n    \"channels\": 40,\n    \"compared_length\": null,\n    \"depth\": 10,\n    \"nb_steps\": 600,\n    \"in_channels\": 1,\n    \"kernel_size\": 3,\n    \"penalty\": null,\n    \"early_stopping\": null,\n    \"lr\": 0.001,\n    \"nb_random_samples\": 10,\n    \"negative_penalty\": 1,\n    \"out_channels\": 320,\n    \"reduced_size\": 160\n}\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/losses/__init__.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport pkgutil\n\n__all__ = []\nfor loader, module_name, is_pkg in pkgutil.walk_packages(__path__):\n    __all__.append(module_name)\n    module = loader.find_module(module_name).load_module(module_name)\n    exec('%s = module' % module_name)\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/losses/triplet_loss.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport torch\nimport numpy\n\n\nclass TripletLoss(torch.nn.modules.loss._Loss):\n    \"\"\"\n    Triplet loss for representations of time series. Optimized for training\n    sets where all time series have the same length.\n\n    Takes as input a tensor as the chosen batch to compute the loss,\n    a PyTorch module as the encoder, a 3D tensor (`B`, `C`, `L`) containing\n    the training set, where `B` is the batch size, `C` is the number of\n    channels and `L` is the length of the time series, as well as a boolean\n    which, if True, enables to save GPU memory by propagating gradients after\n    each loss term, instead of doing it after computing the whole loss.\n\n    The triplets are chosen in the following manner. First the size of the\n    positive and negative samples are randomly chosen in the range of lengths\n    of time series in the dataset. The size of the anchor time series is\n    randomly chosen with the same length upper bound but the the length of the\n    positive samples as lower bound. An anchor of this length is then chosen\n    randomly in the given time series of the train set, and positive samples\n    are randomly chosen among subseries of the anchor. Finally, negative\n    samples of the chosen length are randomly chosen in random time series of\n    the train set.\n\n    @param compared_length Maximum length of randomly chosen time series. If\n           None, this parameter is ignored.\n    @param nb_random_samples Number of negative samples per batch example.\n    @param negative_penalty Multiplicative coefficient for the negative sample\n           loss.\n    \"\"\"\n    def __init__(self, compared_length, nb_random_samples, negative_penalty):\n        super(TripletLoss, self).__init__()\n        self.compared_length = compared_length\n        if self.compared_length is None:\n            self.compared_length = numpy.inf\n        self.nb_random_samples = nb_random_samples\n        self.negative_penalty = negative_penalty\n\n    def forward(self, batch, encoder, train, save_memory=False):\n        batch_size = batch.size(0)\n        train_size = train.size(0)\n        length = min(self.compared_length, train.size(2))\n\n        # For each batch element, we pick nb_random_samples possible random\n        # time series in the training set (choice of batches from where the\n        # negative examples will be sampled)\n        samples = numpy.random.choice(\n            train_size, size=(self.nb_random_samples, batch_size)\n        )\n        samples = torch.LongTensor(samples)\n\n        # Choice of length of positive and negative samples\n        length_pos_neg = numpy.random.randint(1, high=length + 1)\n\n        # We choose for each batch example a random interval in the time\n        # series, which is the 'anchor'\n        random_length = numpy.random.randint(\n            length_pos_neg, high=length + 1\n        )  # Length of anchors\n        beginning_batches = numpy.random.randint(\n            0, high=length - random_length + 1, size=batch_size\n        )  # Start of anchors\n\n        # The positive samples are chosen at random in the chosen anchors\n        beginning_samples_pos = numpy.random.randint(\n            0, high=random_length - length_pos_neg + 1, size=batch_size\n        )  # Start of positive samples in the anchors\n        # Start of positive samples in the batch examples\n        beginning_positive = beginning_batches + beginning_samples_pos\n        # End of positive samples in the batch examples\n        end_positive = beginning_positive + length_pos_neg\n\n        # We randomly choose nb_random_samples potential negative samples for\n        # each batch example\n        beginning_samples_neg = numpy.random.randint(\n            0, high=length - length_pos_neg + 1,\n            size=(self.nb_random_samples, batch_size)\n        )\n\n        representation = encoder(torch.cat(\n            [batch[\n                j: j + 1, :,\n                beginning_batches[j]: beginning_batches[j] + random_length\n            ] for j in range(batch_size)]\n        ))  # Anchors representations\n\n        positive_representation = encoder(torch.cat(\n            [batch[\n                j: j + 1, :, end_positive[j] - length_pos_neg: end_positive[j]\n            ] for j in range(batch_size)]\n        ))  # Positive samples representations\n\n        size_representation = representation.size(1)\n        # Positive loss: -logsigmoid of dot product between anchor and positive\n        # representations\n        loss = -torch.mean(torch.nn.functional.logsigmoid(torch.bmm(\n            representation.view(batch_size, 1, size_representation),\n            positive_representation.view(batch_size, size_representation, 1)\n        )))\n\n        # If required, backward through the first computed term of the loss and\n        # free from the graph everything related to the positive sample\n        if save_memory:\n            loss.backward(retain_graph=True)\n            loss = 0\n            del positive_representation\n            torch.cuda.empty_cache()\n\n        multiplicative_ratio = self.negative_penalty / self.nb_random_samples\n        for i in range(self.nb_random_samples):\n            # Negative loss: -logsigmoid of minus the dot product between\n            # anchor and negative representations\n            negative_representation = encoder(\n                torch.cat([train[samples[i, j]: samples[i, j] + 1][\n                    :, :,\n                    beginning_samples_neg[i, j]:\n                    beginning_samples_neg[i, j] + length_pos_neg\n                ] for j in range(batch_size)])\n            )\n            loss += multiplicative_ratio * -torch.mean(\n                torch.nn.functional.logsigmoid(-torch.bmm(\n                    representation.view(batch_size, 1, size_representation),\n                    negative_representation.view(\n                        batch_size, size_representation, 1\n                    )\n                ))\n            )\n            # If required, backward through the first computed term of the loss\n            # and free from the graph everything related to the negative sample\n            # Leaves the last backward pass to the training procedure\n            if save_memory and i != self.nb_random_samples - 1:\n                loss.backward(retain_graph=True)\n                loss = 0\n                del negative_representation\n                torch.cuda.empty_cache()\n\n        return loss\n\n\nclass TripletLossVaryingLength(torch.nn.modules.loss._Loss):\n    \"\"\"\n    Triplet loss for representations of time series where the training set\n    features time series with unequal lengths.\n\n    Takes as input a tensor as the chosen batch to compute the loss,\n    a PyTorch module as the encoder, a 3D tensor (`B`, `C`, `L`) containing the\n    training set, where `B` is the batch size, `C` is the number of channels\n    and `L` is the maximum length of the time series (NaN values representing\n    the end of a shorter time series), as well as a boolean which, if True,\n    enables to save GPU memory by propagating gradients after each loss term,\n    instead of doing it after computing the whole loss.\n\n    The triplets are chosen in the following manner. First the sizes of\n    positive and negative samples are randomly chosen in the range of lengths\n    of time series in the dataset. The size of the anchor time series is\n    randomly chosen with the same length upper bound but the the length of the\n    positive samples as lower bound. An anchor of this length is then chosen\n    randomly in the given time series of the train set, and positive samples\n    are randomly chosen among subseries of the anchor. Finally, negative\n    samples of the chosen length are randomly chosen in random time series of\n    the train set.\n\n    @param compared_length Maximum length of randomly chosen time series. If\n           None, this parameter is ignored.\n    @param nb_random_samples Number of negative samples per batch example.\n    @param negative_penalty Multiplicative coefficient for the negative sample\n           loss.\n    \"\"\"\n    def __init__(self, compared_length, nb_random_samples, negative_penalty):\n        super(TripletLossVaryingLength, self).__init__()\n        self.compared_length = compared_length\n        if self.compared_length is None:\n            self.compared_length = numpy.inf\n        self.nb_random_samples = nb_random_samples\n        self.negative_penalty = negative_penalty\n\n    def forward(self, batch, encoder, train, save_memory=False):\n        batch_size = batch.size(0)\n        train_size = train.size(0)\n        max_length = train.size(2)\n\n        # For each batch element, we pick nb_random_samples possible random\n        # time series in the training set (choice of batches from where the\n        # negative examples will be sampled)\n        samples = numpy.random.choice(\n            train_size, size=(self.nb_random_samples, batch_size)\n        )\n        samples = torch.LongTensor(samples)\n\n        # Computation of the lengths of the relevant time series\n        with torch.no_grad():\n            lengths_batch = max_length - torch.sum(\n                torch.isnan(batch[:, 0]), 1\n            ).data.cpu().numpy()\n            lengths_samples = numpy.empty(\n                (self.nb_random_samples, batch_size), dtype=int\n            )\n            for i in range(self.nb_random_samples):\n                lengths_samples[i] = max_length - torch.sum(\n                    torch.isnan(train[samples[i], 0]), 1\n                ).data.cpu().numpy()\n\n        # Choice of lengths of positive and negative samples\n        lengths_pos = numpy.empty(batch_size, dtype=int)\n        lengths_neg = numpy.empty(\n            (self.nb_random_samples, batch_size), dtype=int\n        )\n        for j in range(batch_size):\n            lengths_pos[j] = numpy.random.randint(\n                1, high=min(self.compared_length, lengths_batch[j]) + 1\n            )\n            for i in range(self.nb_random_samples):\n                lengths_neg[i, j] = numpy.random.randint(\n                    1,\n                    high=min(self.compared_length, lengths_samples[i, j]) + 1\n                )\n\n        # We choose for each batch example a random interval in the time\n        # series, which is the 'anchor'\n        random_length = numpy.array([numpy.random.randint(\n            lengths_pos[j],\n            high=min(self.compared_length, lengths_batch[j]) + 1\n        ) for j in range(batch_size)])  # Length of anchors\n        beginning_batches = numpy.array([numpy.random.randint(\n            0, high=lengths_batch[j] - random_length[j] + 1\n        ) for j in range(batch_size)])  # Start of anchors\n\n        # The positive samples are chosen at random in the chosen anchors\n        # Start of positive samples in the anchors\n        beginning_samples_pos = numpy.array([numpy.random.randint(\n            0, high=random_length[j] - lengths_pos[j] + 1\n        ) for j in range(batch_size)])\n        # Start of positive samples in the batch examples\n        beginning_positive = beginning_batches + beginning_samples_pos\n        # End of positive samples in the batch examples\n        end_positive = beginning_positive + lengths_pos\n\n        # We randomly choose nb_random_samples potential negative samples for\n        # each batch example\n        beginning_samples_neg = numpy.array([[numpy.random.randint(\n            0, high=lengths_samples[i, j] - lengths_neg[i, j] + 1\n        ) for j in range(batch_size)] for i in range(self.nb_random_samples)])\n\n        representation = torch.cat([encoder(\n            batch[\n                j: j + 1, :,\n                beginning_batches[j]: beginning_batches[j] + random_length[j]\n            ]\n        ) for j in range(batch_size)])  # Anchors representations\n\n        positive_representation = torch.cat([encoder(\n            batch[\n                j: j + 1, :,\n                end_positive[j] - lengths_pos[j]: end_positive[j]\n            ]\n        ) for j in range(batch_size)])  # Positive samples representations\n\n        size_representation = representation.size(1)\n        # Positive loss: -logsigmoid of dot product between anchor and positive\n        # representations\n        loss = -torch.mean(torch.nn.functional.logsigmoid(torch.bmm(\n            representation.view(batch_size, 1, size_representation),\n            positive_representation.view(batch_size, size_representation, 1)\n        )))\n\n        # If required, backward through the first computed term of the loss and\n        # free from the graph everything related to the positive sample\n        if save_memory:\n            loss.backward(retain_graph=True)\n            loss = 0\n            del positive_representation\n            torch.cuda.empty_cache()\n\n        multiplicative_ratio = self.negative_penalty / self.nb_random_samples\n        for i in range(self.nb_random_samples):\n            # Negative loss: -logsigmoid of minus the dot product between\n            # anchor and negative representations\n            negative_representation = torch.cat([encoder(\n                train[samples[i, j]: samples[i, j] + 1][\n                    :, :,\n                    beginning_samples_neg[i, j]:\n                    beginning_samples_neg[i, j] + lengths_neg[i, j]\n                ]\n            ) for j in range(batch_size)])\n            loss += multiplicative_ratio * -torch.mean(\n                torch.nn.functional.logsigmoid(-torch.bmm(\n                    representation.view(batch_size, 1, size_representation),\n                    negative_representation.view(\n                        batch_size, size_representation, 1\n                    )\n                ))\n            )\n            # If required, backward through the first computed term of the loss\n            # and free from the graph everything related to the negative sample\n            # Leaves the last backward pass to the training procedure\n            if save_memory and i != self.nb_random_samples - 1:\n                loss.backward(retain_graph=True)\n                loss = 0\n                del negative_representation\n                torch.cuda.empty_cache()\n\n        return loss\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/networks/__init__.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport pkgutil\n\n__all__ = []\nfor loader, module_name, is_pkg in pkgutil.walk_packages(__path__):\n    __all__.append(module_name)\n    module = loader.find_module(module_name).load_module(module_name)\n    exec('%s = module' % module_name)\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/networks/causal_cnn.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\n# Implementation of causal CNNs partly taken and modified from\n# https://github.com/locuslab/TCN/blob/master/TCN/tcn.py, originally created\n# with the following license.\n\n# MIT License\n\n# Copyright (c) 2018 CMU Locus Lab\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport torch\n\n\nclass Chomp1d(torch.nn.Module):\n    \"\"\"\n    Removes the last elements of a time series.\n\n    Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n    batch size, `C` is the number of input channels, and `L` is the length of\n    the input. Outputs a three-dimensional tensor (`B`, `C`, `L - s`) where `s`\n    is the number of elements to remove.\n\n    @param chomp_size Number of elements to remove.\n    \"\"\"\n    def __init__(self, chomp_size):\n        super(Chomp1d, self).__init__()\n        self.chomp_size = chomp_size\n\n    def forward(self, x):\n        return x[:, :, :-self.chomp_size]\n\n\nclass SqueezeChannels(torch.nn.Module):\n    \"\"\"\n    Squeezes, in a three-dimensional tensor, the third dimension.\n    \"\"\"\n    def __init__(self):\n        super(SqueezeChannels, self).__init__()\n\n    def forward(self, x):\n        return x.squeeze(2)\n\n\nclass CausalConvolutionBlock(torch.nn.Module):\n    \"\"\"\n    Causal convolution block, composed sequentially of two causal convolutions\n    (with leaky ReLU activation functions), and a parallel residual connection.\n\n    Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n    batch size, `C` is the number of input channels, and `L` is the length of\n    the input. Outputs a three-dimensional tensor (`B`, `C`, `L`).\n\n    @param in_channels Number of input channels.\n    @param out_channels Number of output channels.\n    @param kernel_size Kernel size of the applied non-residual convolutions.\n    @param dilation Dilation parameter of non-residual convolutions.\n    @param final Disables, if True, the last activation function.\n    \"\"\"\n    def __init__(self, in_channels, out_channels, kernel_size, dilation,\n                 final=False):\n        super(CausalConvolutionBlock, self).__init__()\n\n        # Computes left padding so that the applied convolutions are causal\n        padding = (kernel_size - 1) * dilation\n\n        # First causal convolution\n        conv1 = torch.nn.utils.weight_norm(torch.nn.Conv1d(\n            in_channels, out_channels, kernel_size,\n            padding=padding, dilation=dilation\n        ))\n        # The truncation makes the convolution causal\n        chomp1 = Chomp1d(padding)\n        relu1 = torch.nn.LeakyReLU()\n\n        # Second causal convolution\n        conv2 = torch.nn.utils.weight_norm(torch.nn.Conv1d(\n            out_channels, out_channels, kernel_size,\n            padding=padding, dilation=dilation\n        ))\n        chomp2 = Chomp1d(padding)\n        relu2 = torch.nn.LeakyReLU()\n\n        # Causal network\n        self.causal = torch.nn.Sequential(\n            conv1, chomp1, relu1, conv2, chomp2, relu2\n        )\n\n        # Residual connection\n        self.upordownsample = torch.nn.Conv1d(\n            in_channels, out_channels, 1\n        ) if in_channels != out_channels else None\n\n        # Final activation function\n        self.relu = torch.nn.LeakyReLU() if final else None\n\n    def forward(self, x):\n        out_causal = self.causal(x)\n        res = x if self.upordownsample is None else self.upordownsample(x)\n        if self.relu is None:\n            return out_causal + res\n        else:\n            return self.relu(out_causal + res)\n\n\nclass CausalCNN(torch.nn.Module):\n    \"\"\"\n    Causal CNN, composed of a sequence of causal convolution blocks.\n\n    Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n    batch size, `C` is the number of input channels, and `L` is the length of\n    the input. Outputs a three-dimensional tensor (`B`, `C_out`, `L`).\n\n    @param in_channels Number of input channels.\n    @param channels Number of channels processed in the network and of output\n           channels.\n    @param depth Depth of the network.\n    @param out_channels Number of output channels.\n    @param kernel_size Kernel size of the applied non-residual convolutions.\n    \"\"\"\n    def __init__(self, in_channels, channels, depth, out_channels,\n                 kernel_size):\n        super(CausalCNN, self).__init__()\n\n        layers = []  # List of causal convolution blocks\n        dilation_size = 1  # Initial dilation size\n\n        for i in range(depth):\n            in_channels_block = in_channels if i == 0 else channels\n            layers += [CausalConvolutionBlock(\n                in_channels_block, channels, kernel_size, dilation_size\n            )]\n            dilation_size *= 2  # Doubles the dilation size at each step\n\n        # Last layer\n        layers += [CausalConvolutionBlock(\n            channels, out_channels, kernel_size, dilation_size\n        )]\n\n        self.network = torch.nn.Sequential(*layers)\n\n    def forward(self, x):\n        return self.network(x)\n\n\nclass CausalCNNEncoder(torch.nn.Module):\n    \"\"\"\n    Encoder of a time series using a causal CNN: the computed representation is\n    the output of a fully connected layer applied to the output of an adaptive\n    max pooling layer applied on top of the causal CNN, which reduces the\n    length of the time series to a fixed size.\n\n    Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n    batch size, `C` is the number of input channels, and `L` is the length of\n    the input. Outputs a three-dimensional tensor (`B`, `C`).\n\n    @param in_channels Number of input channels.\n    @param channels Number of channels manipulated in the causal CNN.\n    @param depth Depth of the causal CNN.\n    @param reduced_size Fixed length to which the output time series of the\n           causal CNN is reduced.\n    @param out_channels Number of output channels.\n    @param kernel_size Kernel size of the applied non-residual convolutions.\n    \"\"\"\n    def __init__(self, in_channels, channels, depth, reduced_size,\n                 out_channels, kernel_size):\n        super(CausalCNNEncoder, self).__init__()\n        causal_cnn = CausalCNN(\n            in_channels, channels, depth, reduced_size, kernel_size\n        )\n        reduce_size = torch.nn.AdaptiveMaxPool1d(1)\n        squeeze = SqueezeChannels()  # Squeezes the third dimension (time)\n        linear = torch.nn.Linear(reduced_size, out_channels)\n        self.network = torch.nn.Sequential(\n            causal_cnn, reduce_size, squeeze, linear\n        )\n\n    def forward(self, x):\n        return self.network(x)\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/networks/lstm.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport torch\n\n\nclass LSTMEncoder(torch.nn.Module):\n    \"\"\"\n    Encoder of a time series using a LSTM, ccomputing a linear transformation\n    of the output of an LSTM\n\n    Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n    batch size, `C` is the number of input channels, and `L` is the length of\n    the input. Outputs a three-dimensional tensor (`B`, `C`).\n\n    Only works for one-dimensional time series.\n    \"\"\"\n    def __init__(self):\n        super(LSTMEncoder, self).__init__()\n        self.lstm = torch.nn.LSTM(\n            input_size=1, hidden_size=256, num_layers=2\n        )\n        self.linear = torch.nn.Linear(256, 160)\n\n    def forward(self, x):\n        return self.linear(self.lstm(x.permute(2, 0, 1))[0][-1])\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/scikit_wrappers.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport math\nimport numpy\nimport torch\nimport sklearn\nimport sklearn.svm\nimport sklearn.externals\nimport sklearn.model_selection\n\nimport utils\nimport losses\nimport networks\n\n \n\n\nclass TimeSeriesEncoderClassifier(sklearn.base.BaseEstimator,\n                                  sklearn.base.ClassifierMixin):\n    \"\"\"\n    \"Virtual\" class to wrap an encoder of time series as a PyTorch module and\n    a SVM classifier with RBF kernel on top of its computed representations in\n    a scikit-learn class.\n\n    All inheriting classes should implement the get_params and set_params\n    methods, as in the recommendations of scikit-learn.\n\n    @param compared_length Maximum length of randomly chosen time series. If\n           None, this parameter is ignored.\n    @param nb_random_samples Number of randomly chosen intervals to select the\n           final negative sample in the loss.\n    @param negative_penalty Multiplicative coefficient for the negative sample\n           loss.\n    @param batch_size Batch size used during the training of the encoder.\n    @param nb_steps Number of optimization steps to perform for the training of\n           the encoder.\n    @param lr learning rate of the Adam optimizer used to train the encoder.\n    @param penalty Penalty term for the SVM classifier. If None and if the\n           number of samples is high enough, performs a hyperparameter search\n           to find a suitable constant.\n    @param early_stopping Enables, if not None, early stopping heuristic\n           for the training of the representations, based on the final\n           score. Representations are still learned unsupervisedly in this\n           case. If the number of samples per class is no more than 10,\n           disables this heuristic. If not None, accepts an integer\n           representing the patience of the early stopping strategy.\n    @param encoder Encoder PyTorch module.\n    @param params Dictionaries of the parameters of the encoder.\n    @param in_channels Number of input channels of the time series.\n    @param cuda Transfers, if True, all computations to the GPU.\n    @param gpu GPU index to use, if CUDA is enabled.\n    \"\"\"\n    def __init__(self, compared_length, nb_random_samples, negative_penalty,\n                 batch_size, nb_steps, lr, penalty, early_stopping,\n                 encoder, params, in_channels, out_channels, cuda=False,\n                 gpu=0):\n        self.architecture = ''\n        self.cuda = cuda\n        self.gpu = gpu\n        self.batch_size = batch_size\n        self.nb_steps = nb_steps\n        self.lr = lr\n        self.penalty = penalty\n        self.early_stopping = early_stopping\n        self.encoder = encoder\n        self.params = params\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.loss = losses.triplet_loss.TripletLoss(\n            compared_length, nb_random_samples, negative_penalty\n        )\n        self.loss_varying = losses.triplet_loss.TripletLossVaryingLength(\n            compared_length, nb_random_samples, negative_penalty\n        )\n        self.classifier = sklearn.svm.SVC()\n        self.optimizer = torch.optim.Adam(self.encoder.parameters(), lr=lr)\n\n    def save_encoder(self, prefix_file):\n        \"\"\"\n        Saves the encoder and the SVM classifier.\n\n        @param prefix_file Path and prefix of the file where the models should\n               be saved (at '$(prefix_file)_$(architecture)_encoder.pth').\n        \"\"\"\n        torch.save(\n            self.encoder.state_dict(),\n            prefix_file + '_' + self.architecture + '_encoder.pth'\n        )\n\n    def save(self, prefix_file):\n        \"\"\"\n        Saves the encoder and the SVM classifier.\n\n        @param prefix_file Path and prefix of the file where the models should\n               be saved (at '$(prefix_file)_$(architecture)_classifier.pkl' and\n               '$(prefix_file)_$(architecture)_encoder.pth').\n        \"\"\"\n        self.save_encoder(prefix_file)\n        #sklearn.externals.\n        '''\n        sklearn.externals.joblib.dump(\n            self.classifier,\n            prefix_file + '_' + self.architecture + '_classifier.pkl'\n        )\n        '''\n\n    def load_encoder(self, prefix_file):\n        \"\"\"\n        Loads an encoder.\n\n        @param prefix_file Path and prefix of the file where the model should\n               be loaded (at '$(prefix_file)_$(architecture)_encoder.pth').\n        \"\"\"\n        if self.cuda:\n            self.encoder.load_state_dict(torch.load(\n                prefix_file + '_' + self.architecture + '_encoder.pth',\n                map_location=lambda storage, loc: storage.cuda(self.gpu)\n            ))\n        else:\n            self.encoder.load_state_dict(torch.load(\n                prefix_file + '_' + self.architecture + '_encoder.pth',\n                map_location=lambda storage, loc: storage\n            ))\n\n    def load(self, prefix_file):\n        \"\"\"\n        Loads an encoder and an SVM classifier.\n\n        @param prefix_file Path and prefix of the file where the models should\n               be loaded (at '$(prefix_file)_$(architecture)_classifier.pkl'\n               and '$(prefix_file)_$(architecture)_encoder.pth').\n        \"\"\"\n        self.load_encoder(prefix_file)\n        self.classifier = sklearn.externals.joblib.load(\n            prefix_file + '_' + self.architecture + '_classifier.pkl'\n        )\n\n    def fit_classifier(self, features, y):\n        \"\"\"\n        Trains the classifier using precomputed features. Uses an SVM\n        classifier with RBF kernel.\n\n        @param features Computed features of the training set.\n        @param y Training labels.\n        \"\"\"\n        nb_classes = numpy.shape(numpy.unique(y, return_counts=True)[1])[0]\n        train_size = numpy.shape(features)[0]\n        # To use a 1-NN classifier, no need for model selection, simply\n        # replace the code by the following:\n        # import sklearn.neighbors\n        # self.classifier = sklearn.neighbors.KNeighborsClassifier(\n        #     n_neighbors=1\n        # )\n        # return self.classifier.fit(features, y)\n        self.classifier = sklearn.svm.SVC(\n            C=1 / self.penalty\n            if self.penalty is not None and self.penalty > 0\n            else numpy.inf,\n            gamma='scale'\n        )\n        if train_size // nb_classes < 5 or train_size < 50 or self.penalty is not None:\n            return self.classifier.fit(features, y)\n        else:\n            grid_search = sklearn.model_selection.GridSearchCV(\n                self.classifier, {\n                    'C': [\n                        0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000,\n                        numpy.inf\n                    ],\n                    'kernel': ['rbf'],\n                    'degree': [3],\n                    'gamma': ['scale'],\n                    'coef0': [0],\n                    'shrinking': [True],\n                    'probability': [False],\n                    'tol': [0.001],\n                    'cache_size': [200],\n                    'class_weight': [None],\n                    'verbose': [False],\n                    'max_iter': [10000000],\n                    'decision_function_shape': ['ovr'],\n                    'random_state': [None]\n                },\n                cv=5, n_jobs=5\n            )\n            if train_size <= 10000:\n                grid_search.fit(features, y)\n            else:\n                # If the training set is too large, subsample 10000 train\n                # examples\n                split = sklearn.model_selection.train_test_split(\n                    features, y,\n                    train_size=10000, random_state=0, stratify=y\n                )\n                grid_search.fit(split[0], split[2])\n            self.classifier = grid_search.best_estimator_\n            return self.classifier\n\n    def fit_encoder(self, X, y=None, save_memory=False, verbose=True):\n        \"\"\"\n        Trains the encoder unsupervisedly using the given training data.\n\n        @param X Training set.\n        @param y Training labels, used only for early stopping, if enabled. If\n               None, disables early stopping in the method.\n        @param save_memory If True, enables to save GPU memory by propagating\n               gradients after each loss term of the encoder loss, instead of\n               doing it after computing the whole loss.\n        @param verbose Enables, if True, to monitor which epoch is running in\n               the encoder training.\n        \"\"\"\n        # Check if the given time series have unequal lengths\n        varying = bool(numpy.isnan(numpy.sum(X)))\n\n        train = torch.from_numpy(X)\n        if self.cuda:\n            train = train.cuda(self.gpu)\n\n        if y is not None:\n            nb_classes = numpy.shape(numpy.unique(y, return_counts=True)[1])[0]\n            train_size = numpy.shape(X)[0]\n            ratio = train_size // nb_classes\n\n        X = torch.from_numpy(X).cuda(self.gpu)\n        train_torch_dataset = utils.Dataset(X)\n        train_generator = torch.utils.data.DataLoader(\n            train_torch_dataset, batch_size=self.batch_size, shuffle=True\n        )\n\n        max_score = 0\n        i = 0  # Number of performed optimization steps\n        epochs = 0  # Number of performed epochs\n        count = 0  # Count of number of epochs without improvement\n        # Will be true if, by enabling epoch_selection, a model was selected\n        # using cross-validation\n        found_best = False\n        # Encoder training\n        while i < self.nb_steps:\n            if verbose:\n                print('Epoch: ', epochs + 1)\n            for batch in train_generator:\n                '''\n                if self.cuda:\n                    batch = batch.cuda(self.gpu)\n                '''\n                self.optimizer.zero_grad()\n                if not varying:\n                    loss = self.loss(\n                        batch, self.encoder, train, save_memory=save_memory\n                    )\n                else:\n                    loss = self.loss_varying(\n                        batch, self.encoder, train, save_memory=save_memory\n                    )\n                loss.backward()\n                self.optimizer.step()\n                i += 1\n                if i >= self.nb_steps:\n                    break\n            epochs += 1\n            # Early stopping strategy\n            if self.early_stopping is not None and y is not None and (\n                ratio >= 5 and train_size >= 50\n            ):\n                # Computes the best regularization parameters\n                features = self.encode(X)\n                self.classifier = self.fit_classifier(features, y)\n                # Cross validation score\n                score = numpy.mean(sklearn.model_selection.cross_val_score(\n                    self.classifier, features, y=y, cv=5, n_jobs=5\n                ))\n                count += 1\n                # If the model is better than the previous one, update\n                if score > max_score:\n                    count = 0\n                    found_best = True\n                    max_score = score\n                    best_encoder = type(self.encoder)(**self.params)\n                    best_encoder.double()\n                    if self.cuda:\n                        best_encoder.cuda(self.gpu)\n                    best_encoder.load_state_dict(self.encoder.state_dict())\n            if count == self.early_stopping:\n                break\n\n        # If a better model was found, use it\n        if found_best:\n            self.encoder = best_encoder\n\n        return self.encoder\n\n    def fit(self, X, y, save_memory=False, verbose=False):\n        \"\"\"\n        Trains sequentially the encoder unsupervisedly and then the classifier\n        using the given labels over the learned features.\n\n        @param X Training set.\n        @param y Training labels.\n        @param save_memory If True, enables to save GPU memory by propagating\n               gradients after each loss term of the encoder loss, instead of\n               doing it after computing the whole loss.\n        @param verbose Enables, if True, to monitor which epoch is running in\n               the encoder training.\n        \"\"\"\n        # Fitting encoder\n        self.encoder = self.fit_encoder(\n            X, y=y, save_memory=save_memory, verbose=verbose\n        )\n\n        # SVM classifier training\n        features = self.encode(X)\n        self.classifier = self.fit_classifier(features, y)\n\n        return self\n\n    def encode(self, X, batch_size=50):\n        \"\"\"\n        Outputs the representations associated to the input by the encoder.\n\n        @param X Testing set.\n        @param batch_size Size of batches used for splitting the test data to\n               avoid out of memory errors when using CUDA. Ignored if the\n               testing set contains time series of unequal lengths.\n        \"\"\"\n        # Check if the given time series have unequal lengths\n        varying = bool(numpy.isnan(numpy.sum(X)))\n\n        test = utils.Dataset(X)\n        test_generator = torch.utils.data.DataLoader(\n            test, batch_size=batch_size if not varying else 1\n        )\n        features = numpy.zeros((numpy.shape(X)[0], self.out_channels))\n        self.encoder = self.encoder.eval()\n\n        count = 0\n        with torch.no_grad():\n            if not varying:\n                for batch in test_generator:\n                    if self.cuda:\n                        batch = batch.cuda(self.gpu)\n                    features[\n                        count * batch_size: (count + 1) * batch_size\n                    ] = self.encoder(batch).cpu()\n                    count += 1\n            else:\n                for batch in test_generator:\n                    if self.cuda:\n                        batch = batch.cuda(self.gpu)\n                    length = batch.size(2) - torch.sum(\n                        torch.isnan(batch[0, 0])\n                    ).data.cpu().numpy()\n                    features[count: count + 1] = self.encoder(\n                        batch[:, :, :length]\n                    ).cpu()\n                    count += 1\n\n        self.encoder = self.encoder.train()\n        return features\n\n    def encode_window(self, X, window, batch_size=50, window_batch_size=10000):\n        \"\"\"\n        Outputs the representations associated to the input by the encoder,\n        for each subseries of the input of the given size (sliding window\n        representations).\n\n        @param X Testing set.\n        @param window Size of the sliding window.\n        @param batch_size Size of batches used for splitting the test data to\n               avoid out of memory errors when using CUDA.\n        @param window_batch_size Size of batches of windows to compute in a\n               run of encode, to save RAM.\n        \"\"\"\n        features = numpy.empty((\n                numpy.shape(X)[0], self.out_channels,\n                numpy.shape(X)[2] - window + 1\n        ))\n        masking = numpy.empty((\n            min(window_batch_size, numpy.shape(X)[2] - window + 1),\n            numpy.shape(X)[1], window\n        ))\n        for b in range(numpy.shape(X)[0]):\n            for i in range(math.ceil(\n                (numpy.shape(X)[2] - window + 1) / window_batch_size)\n            ):\n                for j in range(\n                    i * window_batch_size,\n                    min(\n                        (i + 1) * window_batch_size,\n                        numpy.shape(X)[2] - window + 1\n                    )\n                ):\n                    j0 = j - i * window_batch_size\n                    masking[j0, :, :] = X[b, :, j: j + window]\n                features[\n                    b, :, i * window_batch_size: (i + 1) * window_batch_size\n                ] = numpy.swapaxes(\n                    self.encode(masking[:j0 + 1], batch_size=batch_size), 0, 1\n                )\n        return features\n\n    def predict(self, X, batch_size=50):\n        \"\"\"\n        Outputs the class predictions for the given test data.\n\n        @param X Testing set.\n        @param batch_size Size of batches used for splitting the test data to\n               avoid out of memory errors when using CUDA. Ignored if the\n               testing set contains time series of unequal lengths.\n        \"\"\"\n        features = self.encode(X, batch_size=batch_size)\n        return self.classifier.predict(features)\n\n    def score(self, X, y, batch_size=50):\n        \"\"\"\n        Outputs accuracy of the SVM classifier on the given testing data.\n\n        @param X Testing set.\n        @param y Testing labels.\n        @param batch_size Size of batches used for splitting the test data to\n               avoid out of memory errors when using CUDA. Ignored if the\n               testing set contains time series of unequal lengths.\n        \"\"\"\n        features = self.encode(X, batch_size=batch_size)\n        return self.classifier.score(features, y)\n\n\nclass CausalCNNEncoderClassifier(TimeSeriesEncoderClassifier):\n    \"\"\"\n    Wraps a causal CNN encoder of time series as a PyTorch module and a\n    SVM classifier on top of its computed representations in a scikit-learn\n    class.\n\n    @param compared_length Maximum length of randomly chosen time series. If\n           None, this parameter is ignored.\n    @param nb_random_samples Number of randomly chosen intervals to select the\n           final negative sample in the loss.\n    @param negative_penalty Multiplicative coefficient for the negative sample\n           loss.\n    @param batch_size Batch size used during the training of the encoder.\n    @param nb_steps Number of optimization steps to perform for the training of\n           the encoder.\n    @param lr learning rate of the Adam optimizer used to train the encoder.\n    @param penalty Penalty term for the SVM classifier. If None and if the\n           number of samples is high enough, performs a hyperparameter search\n           to find a suitable constant.\n    @param early_stopping Enables, if not None, early stopping heuristic\n           for the training of the representations, based on the final\n           score. Representations are still learned unsupervisedly in this\n           case. If the number of samples per class is no more than 10,\n           disables this heuristic. If not None, accepts an integer\n           representing the patience of the early stopping strategy.\n    @param channels Number of channels manipulated in the causal CNN.\n    @param depth Depth of the causal CNN.\n    @param reduced_size Fixed length to which the output time series of the\n           causal CNN is reduced.\n    @param out_channels Number of features in the final output.\n    @param kernel_size Kernel size of the applied non-residual convolutions.\n    @param in_channels Number of input channels of the time series.\n    @param cuda Transfers, if True, all computations to the GPU.\n    @param gpu GPU index to use, if CUDA is enabled.\n    \"\"\"\n    # nb_steps=2000\n    def __init__(self, compared_length=50, nb_random_samples=10,\n                 negative_penalty=1, batch_size=1, nb_steps=2000, lr=0.001,\n                 penalty=1, early_stopping=None, channels=10, depth=1,\n                 reduced_size=10, out_channels=10, kernel_size=4,\n                 in_channels=1, cuda=False, gpu=0):\n        super(CausalCNNEncoderClassifier, self).__init__(\n            compared_length, nb_random_samples, negative_penalty, batch_size,\n            nb_steps, lr, penalty, early_stopping,\n            self.__create_encoder(in_channels, channels, depth, reduced_size,\n                                  out_channels, kernel_size, cuda, gpu),\n            self.__encoder_params(in_channels, channels, depth, reduced_size,\n                                  out_channels, kernel_size),\n            in_channels, out_channels, cuda, gpu\n        )\n        self.architecture = 'CausalCNN'\n        self.channels = channels\n        self.depth = depth\n        self.reduced_size = reduced_size\n        self.kernel_size = kernel_size\n\n    def __create_encoder(self, in_channels, channels, depth, reduced_size,\n                         out_channels, kernel_size, cuda, gpu):\n        encoder = networks.causal_cnn.CausalCNNEncoder(\n            in_channels, channels, depth, reduced_size, out_channels,\n            kernel_size\n        )\n        encoder.double()\n        if cuda:\n            encoder.cuda(gpu)\n        return encoder\n\n    def __encoder_params(self, in_channels, channels, depth, reduced_size,\n                         out_channels, kernel_size):\n        return {\n            'in_channels': in_channels,\n            'channels': channels,\n            'depth': depth,\n            'reduced_size': reduced_size,\n            'out_channels': out_channels,\n            'kernel_size': kernel_size\n        }\n\n    def encode_sequence(self, X, batch_size=50):\n        \"\"\"\n        Outputs the representations associated to the input by the encoder,\n        from the start of the time series to each time step (i.e., the\n        evolution of the representations of the input time series with\n        repect to time steps).\n\n        Takes advantage of the causal CNN (before the max pooling), wich\n        ensures that its output at time step i only depends on time step i and\n        previous time steps.\n\n        @param X Testing set.\n        @param batch_size Size of batches used for splitting the test data to\n               avoid out of memory errors when using CUDA. Ignored if the\n               testing set contains time series of unequal lengths.\n        \"\"\"\n        # Check if the given time series have unequal lengths\n        varying = bool(numpy.isnan(numpy.sum(X)))\n\n        test = utils.Dataset(X)\n        test_generator = torch.utils.data.DataLoader(\n            test, batch_size=batch_size if not varying else 1\n        )\n        length = numpy.shape(X)[2]\n        features = numpy.full(\n            (numpy.shape(X)[0], self.out_channels, length), numpy.nan\n        )\n        self.encoder = self.encoder.eval()\n\n        causal_cnn = self.encoder.network[0]\n        linear = self.encoder.network[3]\n\n        count = 0\n        with torch.no_grad():\n            if not varying:\n                for batch in test_generator:\n                    if self.cuda:\n                        batch = batch.cuda(self.gpu)\n                    # First applies the causal CNN\n                    output_causal_cnn = causal_cnn(batch)\n                    after_pool = torch.empty(\n                        output_causal_cnn.size(), dtype=torch.double\n                    )\n                    if self.cuda:\n                        after_pool = after_pool.cuda(self.gpu)\n                    after_pool[:, :, 0] = output_causal_cnn[:, :, 0]\n                    # Then for each time step, computes the output of the max\n                    # pooling layer\n                    for i in range(1, length):\n                        after_pool[:, :, i] = torch.max(\n                            torch.cat([\n                                after_pool[:, :, i - 1: i],\n                                output_causal_cnn[:, :, i: i+1]\n                            ], dim=2),\n                            dim=2\n                        )[0]\n                    features[\n                        count * batch_size: (count + 1) * batch_size, :, :\n                    ] = torch.transpose(linear(\n                        torch.transpose(after_pool, 1, 2)\n                    ), 1, 2)\n                    count += 1\n            else:\n                for batch in test_generator:\n                    if self.cuda:\n                        batch = batch.cuda(self.gpu)\n                    length = batch.size(2) - torch.sum(\n                        torch.isnan(batch[0, 0])\n                    ).data.cpu().numpy()\n                    output_causal_cnn = causal_cnn(batch)\n                    after_pool = torch.empty(\n                        output_causal_cnn.size(), dtype=torch.double\n                    )\n                    if self.cuda:\n                        after_pool = after_pool.cuda(self.gpu)\n                    after_pool[:, :, 0] = output_causal_cnn[:, :, 0]\n                    for i in range(1, length):\n                        after_pool[:, :, i] = torch.max(\n                            torch.cat([\n                                after_pool[:, :, i - 1: i],\n                                output_causal_cnn[:, :, i: i+1]\n                            ], dim=2),\n                            dim=2\n                        )[0]\n                    features[\n                        count: count + 1, :, :\n                    ] = torch.transpose(linear(\n                        torch.transpose(after_pool, 1, 2)\n                    ), 1, 2)\n                    count += 1\n\n        self.encoder = self.encoder.train()\n        return features\n\n    def get_params(self, deep=True):\n        return {\n            'compared_length': self.loss.compared_length,\n            'nb_random_samples': self.loss.nb_random_samples,\n            'negative_penalty': self.loss.negative_penalty,\n            'batch_size': self.batch_size,\n            'nb_steps': self.nb_steps,\n            'lr': self.lr,\n            'penalty': self.penalty,\n            'early_stopping': self.early_stopping,\n            'channels': self.channels,\n            'depth': self.depth,\n            'reduced_size': self.reduced_size,\n            'kernel_size': self.kernel_size,\n            'in_channels': self.in_channels,\n            'out_channels': self.out_channels,\n            'cuda': self.cuda,\n            'gpu': self.gpu\n        }\n\n    def set_params(self, compared_length, nb_random_samples, negative_penalty,\n                   batch_size, nb_steps, lr, penalty, early_stopping,\n                   channels, depth, reduced_size, out_channels, kernel_size,\n                   in_channels, cuda, gpu):\n        self.__init__(\n            compared_length, nb_random_samples, negative_penalty, batch_size,\n            nb_steps, lr, penalty, early_stopping, channels, depth,\n            reduced_size, out_channels, kernel_size, in_channels, cuda, gpu\n        )\n        return self\n\n\nclass LSTMEncoderClassifier(TimeSeriesEncoderClassifier):\n    \"\"\"\n    Wraps an LSTM encoder of time series as a PyTorch module and a SVM\n    classifier on top of its computed representations in a scikit-learn\n    class.\n\n    @param compared_length Maximum length of randomly chosen time series. If\n           None, this parameter is ignored.\n    @param nb_random_samples Number of randomly chosen intervals to select the\n           final negative sample in the loss.\n    @param negative_penalty Multiplicative coefficient for the negative sample\n           loss.\n    @param batch_size Batch size used during the training of the encoder.\n    @param nb_steps Number of optimization steps to perform for the training of\n           the encoder.\n    @param lr learning rate of the Adam optimizer used to train the encoder.\n    @param penalty Penalty term for the SVM classifier. If None and if the\n           number of samples is high enough, performs a hyperparameter search\n           to find a suitable constant.\n    @param early_stopping Enables, if not None, early stopping heuristic\n           for the training of the representations, based on the final\n           score. Representations are still learned unsupervisedly in this\n           case. If the number of samples per class is no more than 10,\n           disables this heuristic. If not None, accepts an integer\n           representing the patience of the early stopping strategy.\n    @param cuda Transfers, if True, all computations to the GPU.\n    @param in_channels Number of input channels of the time series.\n    @param gpu GPU index to use, if CUDA is enabled.\n    \"\"\"\n    def __init__(self, compared_length=50, nb_random_samples=10,\n                 negative_penalty=1, batch_size=1, nb_steps=2000, lr=0.001,\n                 penalty=1, early_stopping=None, in_channels=1, cuda=False,\n                 gpu=0):\n        super(LSTMEncoderClassifier, self).__init__(\n            compared_length, nb_random_samples, negative_penalty, batch_size,\n            nb_steps, lr, penalty, early_stopping,\n            self.__create_encoder(cuda, gpu), {}, in_channels, 160, cuda, gpu\n        )\n        assert in_channels == 1\n        self.architecture = 'LSTM'\n\n    def __create_encoder(self, cuda, gpu):\n        encoder = networks.lstm.LSTMEncoder()\n        encoder.double()\n        if cuda:\n            encoder.cuda(gpu)\n        return encoder\n\n    def get_params(self, deep=True):\n        return {\n            'compared_length': self.loss.compared_length,\n            'nb_random_samples': self.loss.nb_random_samples,\n            'negative_penalty': self.loss.negative_penalty,\n            'batch_size': self.batch_size,\n            'nb_steps': self.nb_steps,\n            'lr': self.lr,\n            'penalty': self.penalty,\n            'early_stopping': self.early_stopping,\n            'in_channels': self.in_channels,\n            'cuda': self.cuda,\n            'gpu': self.gpu\n        }\n\n    def set_params(self, compared_length, nb_random_samples, negative_penalty,\n                   batch_size, nb_steps, lr, penalty, early_stopping,\n                   in_channels, cuda, gpu):\n        self.__init__(\n            compared_length, nb_random_samples, negative_penalty, batch_size,\n            nb_steps, lr, penalty, early_stopping, in_channels, cuda, gpu\n        )\n        return self\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/scripts/ucr.sh",
    "content": "python ucr.py --dataset AllGestureWiimoteY --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset UWaveGestureLibraryX --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset DiatomSizeReduction --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset FreezerSmallTrain --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ScreenType --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset MixedShapesSmallTrain --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SonyAIBORobotSurface2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset LargeKitchenAppliances --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ProximalPhalanxOutlineCorrect --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset OSULeaf --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset OliveOil --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset FreezerRegularTrain --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Herring --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GesturePebbleZ1 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset MelbournePedestrian --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset PhalangesOutlinesCorrect --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset CricketZ --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ACSF1 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset FaceFour --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SemgHandGenderCh2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Haptics --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset UWaveGestureLibraryY --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Coffee --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset TwoLeadECG --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset DistalPhalanxOutlineAgeGroup --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset MixedShapesRegularTrain --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SmoothSubspace --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Meat --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ShapesAll --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset InsectEPGSmallTrain --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset CinCECGTorso --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset BeetleFly --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Ham --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ProximalPhalanxTW --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ItalyPowerDemand --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GunPointMaleVersusFemale --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SonyAIBORobotSurface1 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset MedicalImages --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SmallKitchenAppliances --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset PigCVP --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Crop --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Chinatown --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset PLAID --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset RefrigerationDevices --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Wine --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Yoga --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset AllGestureWiimoteX --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset DistalPhalanxTW --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Computers --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ElectricDevices --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Adiac --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset InlineSkate --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset FacesUCR --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ShapeletSim --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GunPointAgeSpan --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Phoneme --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset CricketX --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Lightning2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Beef --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset PowerCons --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Plane --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset NonInvasiveFetalECGThorax2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset UMD --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Wafer --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ToeSegmentation1 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Car --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset UWaveGestureLibraryZ --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset EOGVerticalSignal --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset CBF --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset EOGHorizontalSignal --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Strawberry --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset StarLightCurves --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset DodgerLoopGame --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset FordA --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Fish --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset PigArtPressure --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ShakeGestureWiimoteZ --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ECGFiveDays --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GunPointOldVersusYoung --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GesturePebbleZ2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ECG200 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Symbols --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset FordB --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset FaceAll --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset MiddlePhalanxTW --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset MiddlePhalanxOutlineCorrect --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GestureMidAirD1 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset InsectEPGRegularTrain --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset DodgerLoopDay --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ProximalPhalanxOutlineAgeGroup --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset HandOutlines --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SwedishLeaf --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset AllGestureWiimoteZ --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset InsectWingbeatSound --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset MiddlePhalanxOutlineAgeGroup --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GestureMidAirD3 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ChlorineConcentration --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ArrowHead --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Fungi --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset PigAirwayPressure --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset PickupGestureWiimoteZ --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Rock --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Worms --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Lightning7 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset BME --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SyntheticControl --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset MoteStrain --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SemgHandMovementCh2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Mallat --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GestureMidAirD2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset CricketY --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset NonInvasiveFetalECGThorax1 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ToeSegmentation2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset ECG5000 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Trace --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset WormsTwoClass --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset GunPoint --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset UWaveGestureLibraryAll --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset EthanolLevel --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset WordSynonyms --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset HouseTwenty --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset DodgerLoopWeekend --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset Earthquakes --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset TwoPatterns --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset DistalPhalanxOutlineCorrect --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset SemgHandSubjectCh2 --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset FiftyWords --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython ucr.py --dataset BirdChicken --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json  --cuda --random_seed 42\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/scripts/uea.sh",
    "content": "python uea.py --dataset CharacterTrajectories --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset PenDigits --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset InsectWingbeat --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset ArticularyWordRecognition --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset Heartbeat --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset Handwriting --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset AtrialFibrillation --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset PhonemeSpectra --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset BasicMotions --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset SelfRegulationSCP1 --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset JapaneseVowels --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset UWaveGestureLibrary --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset Libras --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset EthanolConcentration --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset RacketSports --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset NATOPS --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset StandWalkJump --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset ERing --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset HandMovementDirection --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset SelfRegulationSCP2 --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset Epilepsy --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset FaceDetection --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset PEMS-SF --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset Cricket --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset SpokenArabicDigits --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset DuckDuckGeese --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset FingerMovements --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset EigenWorms --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset MotorImagery --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\npython uea.py --dataset LSST --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.json  --cuda --random_seed 42\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/transfer_ucr.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nfrom copyreg import pickle\nimport os\nimport json\nimport torch\nimport argparse\nimport pickle\nimport ucr\nimport scikit_wrappers\n\n\ndef parse_arguments():\n    parser = argparse.ArgumentParser(\n        description='Uses the learned representations for a dataset to ' +\n                    'learn classifiers for all other UCR datasets'\n    )\n    parser.add_argument('--path', type=str, metavar='PATH', required=True,\n                        help='path where the UCR datasets are located')\n    parser.add_argument('--save_path', type=str, metavar='PATH', required=True,\n                        help='path where the encoder is saved')\n    parser.add_argument('--dataset', type=str, metavar='D', required=True,\n                        help='dataset name')\n    parser.add_argument('--cuda', action='store_true',\n                        help='activate to use CUDA')\n    parser.add_argument('--gpu', type=int, default=0, metavar='GPU',\n                        help='index of GPU used for computations (default: 0)')\n    return parser.parse_args()\n\n\nif __name__ == '__main__':\n    args = parse_arguments()\n    if args.cuda and not torch.cuda.is_available():\n        print(\"CUDA is not available, proceeding without it...\")\n        args.cuda = False\n\n    classifier = scikit_wrappers.CausalCNNEncoderClassifier()\n\n    hf = open(\n        os.path.join(args.save_path, args.dataset + '_hyperparameters.json'),\n        'r'\n    )\n    hp_dict = json.load(hf)\n    hf.close()\n    hp_dict['cuda'] = args.cuda\n    hp_dict['gpu'] = args.gpu\n    classifier.set_params(**hp_dict)\n    classifier.load(os.path.join(args.save_path, args.dataset))\n\n    print(\"Classification tasks...\")\n\n    results = {}\n    # List of folders / datasets in the given path\n    datasets = [x[0][len(args.path) + 1:] for x in os.walk(args.path)][1:]\n    for dataset in datasets:\n        train, train_labels, test, test_labels = ucr.load_UCR_dataset(\n            args.path, dataset\n        )\n        \n        classifier.fit_classifier(classifier.encode(train), train_labels)\n        print(\n            dataset,\n            \"Test accuracy: \" + str(classifier.score(test, test_labels))\n        )\n        results[dataset] = classifier.score(test, test_labels)\n\n    with open('/dev_data/zzj/hzy/pretrained_model/Tloss/results/accu/CricketX_result.pkl', 'ab') as f:\n        pickle.dump(results, f)\n            \n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/ucr.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nfrom tsm_utils import set_seed\nfrom data.preprocessing import *\nimport os\nimport json\nimport math\nimport torch\nimport numpy\nimport pandas\nimport argparse\nimport pickle\nimport scikit_wrappers\nimport sklearn\n\nimport sys\nsys.path.append('..')\n\n\ndef load_UCR_dataset(path, dataset):\n    \"\"\"\n    Loads the UCR dataset given in input in numpy arrays.\n\n    @param path Path where the UCR dataset is located.\n    @param dataset Name of the UCR dataset.\n\n    @return Quadruplet containing the training set, the corresponding training\n            labels, the testing set and the corresponding testing labels.\n    \"\"\"\n    train_file = os.path.join(path, dataset, dataset + \"_TRAIN.tsv\")\n    test_file = os.path.join(path, dataset, dataset + \"_TEST.tsv\")\n    train_df = pandas.read_csv(train_file, sep='\\t', header=None)\n    test_df = pandas.read_csv(test_file, sep='\\t', header=None)\n    train_array = numpy.array(train_df)\n    test_array = numpy.array(test_df)\n\n    # Move the labels to {0, ..., L-1}\n    labels = numpy.unique(train_array[:, 0])\n    transform = {}\n    for i, l in enumerate(labels):\n        transform[l] = i\n\n    train = numpy.expand_dims(train_array[:, 1:], 1).astype(numpy.float64)\n    train_labels = numpy.vectorize(transform.get)(train_array[:, 0])\n    test = numpy.expand_dims(test_array[:, 1:], 1).astype(numpy.float64)\n    test_labels = numpy.vectorize(transform.get)(test_array[:, 0])\n\n    # Normalization for non-normalized datasets\n    # To keep the amplitude information, we do not normalize values over\n    # individual time series, but on the whole dataset\n    if dataset not in [\n        'AllGestureWiimoteX',\n        'AllGestureWiimoteY',\n        'AllGestureWiimoteZ',\n        'BME',\n        'Chinatown',\n        'Crop',\n        'EOGHorizontalSignal',\n        'EOGVerticalSignal',\n        'Fungi',\n        'GestureMidAirD1',\n        'GestureMidAirD2',\n        'GestureMidAirD3',\n        'GesturePebbleZ1',\n        'GesturePebbleZ2',\n        'GunPointAgeSpan',\n        'GunPointMaleVersusFemale',\n        'GunPointOldVersusYoung',\n        'HouseTwenty',\n        'InsectEPGRegularTrain',\n        'InsectEPGSmallTrain',\n        'MelbournePedestrian',\n        'PickupGestureWiimoteZ',\n        'PigAirwayPressure',\n        'PigArtPressure',\n        'PigCVP',\n        'PLAID',\n        'PowerCons',\n        'Rock',\n        'SemgHandGenderCh2',\n        'SemgHandMovementCh2',\n        'SemgHandSubjectCh2',\n        'ShakeGestureWiimoteZ',\n        'SmoothSubspace',\n        'UMD'\n    ]:\n        return train, train_labels, test, test_labels\n    # Post-publication note:\n    # Using the testing set to normalize might bias the learned network,\n    # but with a limited impact on the reported results on few datasets.\n    # See the related discussion here: https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries/pull/13.\n    mean = numpy.nanmean(numpy.concatenate([train, test]))\n    var = numpy.nanvar(numpy.concatenate([train, test]))\n    train = (train - mean) / math.sqrt(var)\n    test = (test - mean) / math.sqrt(var)\n    return train, train_labels, test, test_labels\n\n\ndef fit_hyperparameters(file, train, train_labels, cuda, gpu,\n                        save_memory=False):\n    \"\"\"\n    Creates a classifier from the given set of hyperparameters in the input\n    file, fits it and return it.\n\n    @param file Path of a file containing a set of hyperparemeters.\n    @param train Training set.\n    @param train_labels Labels for the training set.\n    @param cuda If True, enables computations on the GPU.\n    @param gpu GPU to use if CUDA is enabled.\n    @param save_memory If True, save GPU memory by propagating gradients after\n           each loss term, instead of doing it after computing the whole loss.\n    \"\"\"\n    classifier = scikit_wrappers.CausalCNNEncoderClassifier()\n\n    # Loads a given set of hyperparameters and fits a model with those\n    hf = open(os.path.join(file), 'r')\n    params = json.load(hf)\n    hf.close()\n    # Check the number of input channels\n    params['in_channels'] = numpy.shape(train)[1]\n    params['cuda'] = cuda\n    params['gpu'] = gpu\n    classifier.set_params(**params)\n\n    return classifier.fit(\n        train, train_labels, save_memory=save_memory, verbose=True\n    )\n\n\ndef parse_arguments():\n    parser = argparse.ArgumentParser(\n        description='Classification tests for UCR repository datasets'\n    )\n    parser.add_argument('--dataset', type=str, metavar='D', required=True,\n                        help='dataset name')\n    parser.add_argument('--path', type=str, metavar='PATH', required=True,\n                        help='path where the dataset is located')\n    parser.add_argument('--save_path', type=str, metavar='PATH',\n                        help='path where the estimator is/should be saved')\n    parser.add_argument('--cuda', action='store_true',\n                        help='activate to use CUDA')\n    parser.add_argument('--gpu', type=int, default=0, metavar='GPU',\n                        help='index of GPU used for computations (default: 0)')\n    parser.add_argument('--hyper', type=str, metavar='FILE', required=True,\n                        help='path of the file of hyperparameters to use; ' +\n                             'for training; must be a JSON file')\n    parser.add_argument('--load', action='store_true', default=False,\n                        help='activate to load the estimator instead of ' +\n                             'training it')\n    parser.add_argument('--fit_classifier', action='store_true', default=False,\n                        help='if not supervised, activate to load the ' +\n                             'model and retrain the classifier')\n    parser.add_argument('--random_seed', type=int, default=42)\n\n    return parser.parse_args()\n\n\nif __name__ == '__main__':\n    args = parse_arguments()\n\n    if args.cuda and not torch.cuda.is_available():\n        print(\"CUDA is not available, proceeding without it...\")\n        args.cuda = False\n    '''\n    train, train_labels, test, test_labels = load_UCR_dataset(\n        args.path, args.dataset\n    )\n    '''\n   # set seed\n    set_seed(args)\n\n    sum_dataset, sum_target, num_classes = load_data(args.path, args.dataset)\n    '''\n    sum_dataset = normalize_per_series(sum_dataset)\n    sum_dataset = numpy.expand_dims(sum_dataset, 1).astype(numpy.float64)\n    '''\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = k_fold(\n        sum_dataset, sum_target)\n    accuracies = []\n    for i, train_dataset in enumerate(train_datasets):\n        print('{} fold start training!'.format(i+1))\n        train_target = train_targets[i]\n\n        val_target = val_targets[i]\n        val_dataset = val_datasets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, _, test_dataset = fill_nan_value(\n            train_dataset, val_dataset, test_dataset)\n        train_dataset, test_dataset = normalize_per_series(\n            train_dataset), normalize_per_series(test_dataset)\n\n        train_dataset = numpy.concatenate((train_dataset, val_dataset))\n        train_target = numpy.concatenate((train_target, val_target))\n\n        train_dataset,  test_dataset = numpy.expand_dims(train_dataset, 1).astype(\n            numpy.float64), numpy.expand_dims(test_dataset, 1).astype(numpy.float64)\n        if not args.load and not args.fit_classifier:\n\n            classifier = fit_hyperparameters(\n                args.hyper, train_dataset, train_target, args.cuda, args.gpu\n            )\n        else:\n            classifier = scikit_wrappers.CausalCNNEncoderClassifier()\n            hf = open(\n                os.path.join(\n                    args.save_path, args.dataset + '_hyperparameters.json'\n                ), 'r'\n            )\n            hp_dict = json.load(hf)\n            hf.close()\n            hp_dict['cuda'] = args.cuda\n            hp_dict['gpu'] = args.gpu\n            classifier.set_params(**hp_dict)\n            classifier.load(os.path.join(args.save_path, args.dataset))\n\n        if not args.load:\n            if args.fit_classifier:\n                classifier.fit_classifier(\n                    classifier.encode(train_dataset), train_target)\n            '''\n            classifier.save(\n                os.path.join(args.save_path, args.dataset)\n            )\n            with open(\n                os.path.join(\n                    args.save_path, args.dataset + '_hyperparameters.json'\n                ), 'w'\n            ) as fp:\n                json.dump(classifier.get_params(), fp)\n            '''\n\n        print(\"Test accuracy: \" + str(classifier.score(test_dataset, test_target)))\n        accuracies.append(classifier.score(test_dataset, test_target))\n    accuracies = numpy.array(accuracies)\n\n    if os.path.exists('./tloss_result.csv'):\n        result_form = pd.read_csv('./tloss_result.csv')\n    else:\n        result_form = pd.DataFrame(columns=['target', 'accuracy', 'std'])\n\n    result_form = result_form.append({'target': args.dataset, 'accuracy': '%.4f' % numpy.mean(\n        accuracies), 'std': '%.4f' % numpy.std(accuracies)}, ignore_index=True)\n    result_form = result_form.iloc[:, -3:]\n    result_form.to_csv('./tloss_result.csv')\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/uea.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom tsm_utils import set_seed\nfrom data import load_UEA, UEADataset, k_fold, fill_nan_value, normalize_per_series\nimport pandas as pd\nimport os\nimport json\nimport math\nimport torch\nimport numpy\nimport argparse\n'''\nimport weka.core.jvm\nimport weka.core.converters\n'''\nimport time\nimport scikit_wrappers\nimport sys\nsys.path.append('..')\nsys.path.remove('..')\n\n\ndef fit_hyperparameters(file, train, train_labels, cuda, gpu,\n                        save_memory=False):\n    \"\"\"\n    Creates a classifier from the given set of hyperparameters in the input\n    file, fits it and return it.\n\n    @param file Path of a file containing a set of hyperparemeters.\n    @param train Training set.\n    @param train_labels Labels for the training set.\n    @param cuda If True, enables computations on the GPU.\n    @param gpu GPU to use if CUDA is enabled.\n    @param save_memory If True, save GPU memory by propagating gradients after\n           each loss term, instead of doing it after computing the whole loss.\n    \"\"\"\n    classifier = scikit_wrappers.CausalCNNEncoderClassifier()\n\n    # Loads a given set of hyperparameters and fits a model with those\n    hf = open(os.path.join(file), 'r')\n    params = json.load(hf)\n    hf.close()\n    # Check the number of input channels\n    params['in_channels'] = numpy.shape(train)[1]\n    params['cuda'] = cuda\n    params['gpu'] = gpu\n    classifier.set_params(**params)\n    return classifier.fit(\n        train, train_labels, save_memory=save_memory, verbose=True\n    )\n\n\ndef parse_arguments():\n    parser = argparse.ArgumentParser(\n        description='Classification tests for UEA repository datasets'\n    )\n    parser.add_argument('--dataset', type=str, metavar='D', required=True,\n                        help='dataset name')\n    parser.add_argument('--path', type=str, metavar='PATH', required=True,\n                        help='path where the dataset is located')\n    parser.add_argument('--save_path', type=str, metavar='PATH', required=False,\n                        help='path where the estimator is/should be saved')\n    parser.add_argument('--cuda', action='store_true',\n                        help='activate to use CUDA')\n    parser.add_argument('--gpu', type=int, default=0, metavar='GPU',\n                        help='index of GPU used for computations (default: 0)')\n    parser.add_argument('--hyper', type=str, metavar='FILE', required=True,\n                        help='path of the file of hyperparameters to use ' +\n                             'for training; must be a JSON file')\n    parser.add_argument('--load', action='store_true', default=False,\n                        help='activate to load the estimator instead of ' +\n                             'training it')\n    parser.add_argument('--fit_classifier', action='store_true', default=False,\n                        help='if not supervised, activate to load the ' +\n                             'model and retrain the classifier')\n\n    parser.add_argument('--random_seed', type=int, default=42)\n    return parser.parse_args()\n\n\nif __name__ == '__main__':\n    args = parse_arguments()\n    if args.cuda and not torch.cuda.is_available():\n        print(\"CUDA is not available, proceeding without it...\")\n        args.cuda = False\n    set_seed(args)\n    sum_dataset, sum_target, num_classes = load_UEA(args.path, args.dataset)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = k_fold(\n        sum_dataset, sum_target)\n    accuracies = []\n    times = []\n    for i, train_dataset in enumerate(train_datasets):\n        start = time.time()\n        print('{} fold start training!'.format(i+1))\n        train_target = train_targets[i]\n\n        val_target = val_targets[i]\n        val_dataset = val_datasets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n        train_dataset, _, test_dataset = fill_nan_value(\n            train_dataset, val_dataset, test_dataset)\n        train_dataset, test_dataset = normalize_per_series(\n            train_dataset), normalize_per_series(test_dataset)\n\n        train_dataset = numpy.concatenate((train_dataset, val_dataset))\n        train_target = numpy.concatenate((train_target, val_target))\n\n        if not args.load and not args.fit_classifier:\n            classifier = fit_hyperparameters(\n                args.hyper, train_dataset, train_target, args.cuda, args.gpu,\n                save_memory=True\n            )\n        else:\n            classifier = scikit_wrappers.CausalCNNEncoderClassifier()\n            hf = open(\n                os.path.join(\n                    args.save_path, args.dataset + '_hyperparameters.json'\n                ), 'r'\n            )\n            hp_dict = json.load(hf)\n            hf.close()\n            hp_dict['cuda'] = args.cuda\n            hp_dict['gpu'] = args.gpu\n            classifier.set_params(**hp_dict)\n            classifier.load(os.path.join(args.save_path, args.dataset))\n\n        if not args.load:\n            if args.fit_classifier:\n                classifier.fit_classifier(\n                    classifier.encode(train_dataset), train_target)\n\n        accu = classifier.score(test_dataset, test_target)\n        print(\"Test accuracy: \" + str(accu))\n        end = time.time()\n        times.append(end-start)\n        accuracies.append(accu)\n\n    accuracies = numpy.array(accuracies)\n    times = numpy.array(times)\n\n    if os.path.exists('./tloss_uea.csv'):\n        result_form = pd.read_csv('./tloss_uea.csv')\n    else:\n        result_form = pd.DataFrame(\n            columns=['target', 'accuracy', 'std', 'times'])\n\n    result_form = result_form.append({'target': args.dataset, 'accuracy': '%.4f' % numpy.mean(\n        accuracies), 'std': '%.4f' % numpy.std(accuracies), 'times': '%.4f' % numpy.mean(times)}, ignore_index=True)\n    result_form = result_form.iloc[:, -4:]\n    result_form.to_csv('./tloss_uea.csv')\n"
  },
  {
    "path": "ts_classification_methods/tloss_cls/utils.py",
    "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n\n#   http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport numpy\nimport torch.utils.data\n\n\nclass Dataset(torch.utils.data.Dataset):\n    \"\"\"\n    PyTorch wrapper for a numpy dataset.\n\n    @param dataset Numpy array representing the dataset.\n    \"\"\"\n    def __init__(self, dataset):\n        self.dataset = dataset\n\n    def __len__(self):\n        return numpy.shape(self.dataset)[0]\n\n    def __getitem__(self, index):\n        return self.dataset[index]\n\n\nclass LabelledDataset(torch.utils.data.Dataset):\n    \"\"\"\n    PyTorch wrapper for a numpy dataset and its associated labels.\n\n    @param dataset Numpy array representing the dataset.\n    @param labels One-dimensional array of the same length as dataset with\n           non-negative int values.\n    \"\"\"\n    def __init__(self, dataset, labels):\n        self.dataset = dataset\n        self.labels = labels\n\n    def __len__(self):\n        return numpy.shape(self.dataset)[0]\n\n    def __getitem__(self, index):\n        return self.dataset[index], self.labels[index]\n"
  },
  {
    "path": "ts_classification_methods/train.py",
    "content": "import argparse\nimport os\nimport time\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom data.dataloader import UCRDataset, UEADataset\nfrom data.preprocessing import normalize_per_series, fill_nan_value, normalize_train_val_test, load_UEA, normalize_uea_set\nfrom tsm_utils import build_model, set_seed, build_dataset, build_loss, evaluate, get_all_datasets, save_cls_result\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # Base setup\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn or dilated')\n    parser.add_argument('--task', type=str, default='classification', help='classification or reconstruction')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n    # Dataset setup\n    parser.add_argument('--dataset', type=str, default=None, help='dataset (in ucr or uea)')\n    parser.add_argument('--is_uea', type=bool, default=False, help='True or False')\n    parser.add_argument('--dataroot', type=str, default=None, help='path of UCR/UEA folder')\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--seq_len', type=int, default=46, help='seq_len')\n    parser.add_argument('--input_size', type=int, default=1, help='input_size')\n\n    # Dilated Convolution setup\n    parser.add_argument('--depth', type=int, default=3, help='depth of the dilated conv model')\n    parser.add_argument('--in_channels', type=int, default=1, help='input data channel')\n    parser.add_argument('--embedding_channels', type=int, default=40, help='mid layer channel')\n    parser.add_argument('--reduced_size', type=int, default=160, help='number of channels after Global max Pool')\n    parser.add_argument('--out_channels', type=int, default=320, help='number of channels after linear layer')\n    parser.add_argument('--kernel_size', type=int, default=3, help='convolution kernel size')\n\n    # training setup\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')\n    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--batch_size', type=int, default=128, help='(16, 128) larger batch size on the big dataset, ')\n    parser.add_argument('--epoch', type=int, default=1000, help='training epoch')\n    parser.add_argument('--mode', type=str, default='pretrain', help='train mode, default pretrain')\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_tsm/result_tsm_lin')\n    parser.add_argument('--save_csv_name', type=str, default='ex1_test_fcncls_0530_')\n    parser.add_argument('--continue_training', type=int, default=0, help='continue training')\n    parser.add_argument('--cuda', type=str, default='cuda:1')\n\n    # Decoder setup\n    parser.add_argument('--decoder_backbone', type=str, default='rnn', help='backbone of the decoder (rnn or fcn)')\n\n    # classifier setup\n    parser.add_argument('--classifier', type=str, default='linear', help='type of classifier')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n    parser.add_argument('--classifier_embedding', type=int, default=128,\n                        help='embedding dim of the non linear classifier')\n\n    # fintune setup\n    parser.add_argument('--source_dataset', type=str, default=None, help='source dataset of the pretrained model')\n    parser.add_argument('--transfer_strategy', type=str, default='classification', help='classification or reconstruction')\n    # parser.add_argument('--direct_train')\n\n    args = parser.parse_args()\n\n    device = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n    set_seed(args)\n    if args.is_uea:\n        sum_dataset, sum_target, num_classes = load_UEA(args.dataroot, args.dataset)\n        args.input_size = sum_dataset.shape[2]\n        args.in_channels = sum_dataset.shape[2]\n    else:\n        sum_dataset, sum_target, num_classes = build_dataset(args)\n    args.num_classes = num_classes\n    args.seq_len = sum_dataset.shape[1]\n    # print(\"test: sum_dataset.shape = \", sum_dataset.shape)\n    if sum_dataset.shape[0] * 0.6 < args.batch_size:\n        args.batch_size = 16\n\n    model, classifier = build_model(args)\n    model, classifier = model.to(device), classifier.to(device)\n    loss = build_loss(args).to(device)\n    model_init_state = model.state_dict()\n    classifier_init_state = classifier.state_dict()\n\n    if args.optimizer == 'adam':\n        optimizer = torch.optim.Adam([{'params': model.parameters()}, {'params': classifier.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n    if args.mode == 'pretrain' and args.task == 'classification':\n        if not os.path.exists(args.save_dir):\n            os.mkdir(args.save_dir)\n\n        if not os.path.exists(os.path.join(args.save_dir, args.dataset)):\n            os.mkdir(os.path.join(args.save_dir, args.dataset))\n\n        if args.continue_training != 0:\n            model.load_state_dict(torch.load(os.path.join(args.save_dir, args.dataset, 'pretrain_weights.pt')))\n            classifier.load_state_dict(torch.load(os.path.join(args.save_dir, args.dataset, 'classifier_weights.pt')))\n\n        print('{} started pretrain'.format(args.dataset))\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            sum_dataset = normalize_per_series(sum_dataset)\n        else:\n            sum_dataset, _, _ = normalize_train_val_test(sum_dataset, sum_dataset,\n                                                         sum_dataset)\n\n        train_set = UCRDataset(torch.from_numpy(sum_dataset).to(device),\n                               torch.from_numpy(sum_target).to(device).to(torch.int64))\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0)\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        min_loss = float('inf')\n        min_epoch = 0\n        model_to_save = None\n\n        num_steps = train_set.__len__() // args.batch_size\n        for epoch in range(args.epoch - args.continue_training):\n\n            if stop_count == 50 or increase_count == 50:\n                print(\"model convergent at epoch {}, early stopping.\".format(epoch))\n                break\n\n            model.train()\n            classifier.train()\n            epoch_loss = 0\n            epoch_accu = 0\n            for x, y in train_loader:\n                optimizer.zero_grad()\n                pred = model(x)\n\n                pred = classifier(pred)\n\n                step_loss = loss(pred, y)\n\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_loss += step_loss.item()\n                epoch_accu += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n            epoch_loss /= num_steps\n            if abs(epoch_loss - last_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if epoch_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = epoch_loss\n            if epoch_loss < min_loss:\n                min_loss = epoch_loss\n                min_epoch = epoch\n                model_to_save = model.state_dict()\n                classifier_to_save = classifier.state_dict()\n\n            epoch_accu /= num_steps\n            if epoch % 100 == 0:\n                print(\"epoch : {}, loss : {}, accuracy : {}\".format(epoch, epoch_loss, epoch_accu))\n                torch.save(model_to_save, os.path.join(args.save_dir, args.dataset, 'pretrain_weights.pt'))\n                torch.save(classifier_to_save, os.path.join(args.save_dir, args.dataset, 'classifier_weights.pt'))\n\n        print('{} finished pretrain, with min loss {} at epoch {}'.format(args.dataset, min_loss, min_epoch))\n        torch.save(model_to_save, os.path.join(args.save_dir, args.dataset, 'pretrain_weights.pt'))\n\n    if args.mode == 'pretrain' and args.task == 'reconstruction':\n        if not os.path.exists(args.save_dir):\n            os.mkdir(args.save_dir)\n\n        if not os.path.exists(os.path.join(args.save_dir, args.dataset)):\n            os.mkdir(os.path.join(args.save_dir, args.dataset))\n        print('start reconstruction on {}'.format(args.dataset))\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            sum_dataset = normalize_per_series(sum_dataset)\n        else:\n            sum_dataset, _, _ = normalize_train_val_test(sum_dataset, sum_dataset,\n                                                         sum_dataset)\n\n        train_set = UCRDataset(torch.from_numpy(sum_dataset).to(device), torch.from_numpy(sum_target))\n        train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0)\n\n        num_steps = train_set.__len__() // args.batch_size\n        last_loss = 0\n        stop_count = 0\n        min_loss = float('inf')\n        increase_count = 0\n        model_to_save = None\n\n        for epoch in range(args.epoch):\n\n            if stop_count == 50 or increase_count == 50:\n                print(\"model convergent at epoch {}, early stopping.\".format(epoch))\n                break\n\n            model.train()\n            classifier.train()\n            epoch_loss = 0\n\n            for i, (x, _) in enumerate(train_loader):\n                # x -> (batch_size, sequence length)\n                # x_features -> (batch_size, out_channels)\n                # x_reversed -> (batch_size, sequence length), (xt, xt-1. ..., x1)\n                optimizer.zero_grad()\n                x_features = model(x)\n                if args.decoder_backbone == 'fcn':\n                    out_x = classifier(x_features)\n\n                    step_loss = loss(x, out_x)\n                    epoch_loss = epoch_loss + step_loss.item()\n\n                    step_loss.backward()\n                    optimizer.step()\n\n                else:\n                    x_reversed = torch.fliplr(x)\n\n                    # x_reversed -> (batch_size, sequence length, 1)\n                    time_length = x.shape[1]\n\n                    out = x_reversed[:, :, 0]\n\n                    hidden1 = x_features\n                    hidden2 = x_features\n                    hidden3 = x_features\n\n                    step_loss = 0\n                    for i in range(time_length):\n                        hidden1, hidden2, hidden3, out = classifier(hidden1, hidden2, hidden3, out)\n                        step_loss += loss(out, x_reversed[:, :, i])\n\n                    step_loss /= time_length\n                    epoch_loss = epoch_loss + step_loss.item()\n                    step_loss.backward()\n                    optimizer.step()\n\n            epoch_loss /= num_steps\n\n            if epoch % 100 == 0:\n                print(\"epoch : {}, loss : {}\".format(epoch, epoch_loss))\n\n            if epoch_loss < min_loss:\n                model_to_save = model.state_dict()\n                min_loss = epoch_loss\n            # early stopping judge\n            if abs(epoch_loss - last_loss) < 1e-6:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if epoch_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = epoch_loss\n\n        print('{} finished pretrain, with min loss {} '.format(args.dataset, min_loss))\n\n        save_name = args.decoder_backbone + '_reconstruction_' + 'pretrain_weights.pt'\n        torch.save(model_to_save, os.path.join(args.save_dir, args.dataset, save_name))\n\n    if args.mode == 'finetune':\n        print('start finetune on {}'.format(args.dataset))\n\n        train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n            sum_dataset, sum_target)\n\n        losses = []\n        test_accuracies = []\n        train_time = 0.0\n        end_val_epochs = []\n        for i, train_dataset in enumerate(train_datasets):\n            t = time.time()\n            if args.transfer_strategy == 'classification':\n                model.load_state_dict(\n                    torch.load(os.path.join(args.save_dir, args.source_dataset, 'pretrain_weights.pt')))\n            else:\n                if args.decoder_backbone == 'fcn':\n                    model.load_state_dict(\n                        torch.load(\n                            os.path.join(args.save_dir, args.source_dataset, 'fcn_reconstruction_pretrain_weights.pt')))\n                else:\n                    model.load_state_dict(\n                        torch.load(\n                            os.path.join(args.save_dir, args.source_dataset, 'rnn_reconstruction_pretrain_weights.pt')))\n            classifier.load_state_dict(classifier_init_state)\n            print('{} fold start training and evaluate'.format(i))\n            max_accuracy = 0\n\n            train_target = train_targets[i]\n            val_dataset = val_datasets[i]\n            val_target = val_targets[i]\n\n            test_dataset = test_datasets[i]\n            test_target = test_targets[i]\n\n            train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n            if args.normalize_way == 'single':\n                # TODO normalize per series\n                train_dataset = normalize_per_series(train_dataset)\n                val_dataset = normalize_per_series(val_dataset)\n                test_dataset = normalize_per_series(test_dataset)\n            else:\n                train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n                                                                                    test_dataset)\n\n            train_set = UCRDataset(torch.from_numpy(train_dataset).to(device),\n                                   torch.from_numpy(train_target).to(device).to(torch.int64))\n            val_set = UCRDataset(torch.from_numpy(val_dataset).to(device),\n                                 torch.from_numpy(val_target).to(device).to(torch.int64))\n            test_set = UCRDataset(torch.from_numpy(test_dataset).to(device),\n                                  torch.from_numpy(test_target).to(device).to(torch.int64))\n\n            train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True)\n            val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0)\n            test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0)\n\n            train_loss = []\n            train_accuracy = []\n            num_steps = args.epoch // args.batch_size\n\n            last_loss = float('inf')\n            stop_count = 0\n            increase_count = 0\n\n            test_accuracy = 0\n            min_val_loss = float('inf')\n            end_val_epoch = 0\n\n            num_steps = train_set.__len__() // args.batch_size\n            for epoch in range(args.epoch):\n                # early stopping in finetune\n                if stop_count == 50 or increase_count == 50:\n                    print('model convergent at epoch {}, early stopping'.format(epoch))\n                    break\n\n                epoch_train_loss = 0\n                epoch_train_acc = 0\n                model.train()\n                classifier.train()\n                for x, y in train_loader:\n                    optimizer.zero_grad()\n                    pred = model(x)\n                    pred = classifier(pred)\n\n                    step_loss = loss(pred, y)\n                    step_loss.backward()\n                    optimizer.step()\n\n                    epoch_train_loss += step_loss.item()\n                    epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                epoch_train_loss /= num_steps\n                epoch_train_acc /= num_steps\n\n                model.eval()\n                classifier.eval()\n                val_loss, val_accu = evaluate(val_loader, model, classifier, loss, device)\n                if min_val_loss > val_loss:\n                    min_val_loss = val_loss\n                    end_val_epoch = epoch\n                    test_loss, test_accuracy = evaluate(test_loader, model, classifier, loss, device)\n\n                if epoch % 100 == 0:\n                    print(\n                        \"epoch : {}, train loss: {} , train accuracy : {}, \\nval loss : {}, val accuracy : {}, \\ntest loss : {}, test accuracy : {}\".format(\n                            epoch, epoch_train_loss, epoch_train_acc, val_loss, val_accu, test_loss, test_accuracy))\n\n                if abs(last_loss - val_loss) <= 1e-4:\n                    stop_count += 1\n                else:\n                    stop_count = 0\n\n                if val_loss > last_loss:\n                    increase_count += 1\n                else:\n                    increase_count = 0\n\n                last_loss = val_loss\n            test_accuracies.append(test_accuracy)\n            end_val_epochs.append(end_val_epoch)\n            t = time.time() - t\n            train_time += t\n\n            print('{} fold finish training'.format(i))\n\n        test_accuracies = torch.Tensor(test_accuracies)\n        end_val_epochs = np.array(end_val_epochs)\n        save_cls_result(args, test_accu=torch.mean(test_accuracies), test_std=torch.std(test_accuracies),\n                        train_time=train_time / 5, end_val_epoch=np.mean(end_val_epochs))\n        print('Done!')\n\n    if args.mode == 'directly_cls':\n        print('start finetune on {}'.format(args.dataset))\n\n        train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n            sum_dataset, sum_target)\n\n        losses = []\n        test_accuracies = []\n        train_time = 0.0\n        end_val_epochs = []\n        for i, train_dataset in enumerate(train_datasets):\n            t = time.time()\n            model.load_state_dict(model_init_state)\n            classifier.load_state_dict(classifier_init_state)\n            print('{} fold start training and evaluate'.format(i))\n\n            train_target = train_targets[i]\n            val_dataset = val_datasets[i]\n            val_target = val_targets[i]\n\n            test_dataset = test_datasets[i]\n            test_target = test_targets[i]\n\n            train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n            if test_dataset.shape[0] < args.batch_size:\n                args.batch_size = args.batch_size // 2\n\n            if args.normalize_way == 'single':\n                # TODO normalize per series\n                if args.is_uea:\n                    train_dataset = normalize_uea_set(train_dataset)\n                    val_dataset = normalize_uea_set(val_dataset)\n                    test_dataset = normalize_uea_set(test_dataset)\n                else:\n                    train_dataset = normalize_per_series(train_dataset)\n                    val_dataset = normalize_per_series(val_dataset)\n                    test_dataset = normalize_per_series(test_dataset)\n            else:\n                train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n                                                                                    test_dataset)\n\n            if args.is_uea:\n                train_set = UEADataset(torch.from_numpy(train_dataset).type(torch.FloatTensor).to(device),\n                                       torch.from_numpy(train_target).type(torch.FloatTensor).to(device).to(\n                                           torch.int64))\n                val_set = UEADataset(torch.from_numpy(val_dataset).type(torch.FloatTensor).to(device),\n                                     torch.from_numpy(val_target).type(torch.FloatTensor).to(device).to(torch.int64))\n                test_set = UEADataset(torch.from_numpy(test_dataset).type(torch.FloatTensor).to(device),\n                                      torch.from_numpy(test_target).type(torch.FloatTensor).to(device).to(torch.int64))\n            else:\n                train_set = UCRDataset(torch.from_numpy(train_dataset).to(device),\n                                       torch.from_numpy(train_target).to(device).to(torch.int64))\n                val_set = UCRDataset(torch.from_numpy(val_dataset).to(device),\n                                     torch.from_numpy(val_target).to(device).to(torch.int64))\n                test_set = UCRDataset(torch.from_numpy(test_dataset).to(device),\n                                      torch.from_numpy(test_target).to(device).to(torch.int64))\n\n            train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=0, drop_last=True)\n            val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=0)\n            test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=0)\n\n            train_loss = []\n            train_accuracy = []\n            num_steps = args.epoch // args.batch_size\n\n            last_loss = float('inf')\n            stop_count = 0\n            increase_count = 0\n\n            test_accuracy = 0\n            min_val_loss = float('inf')\n            end_val_epoch = 0\n\n            num_steps = train_set.__len__() // args.batch_size\n            # print(\"test, args.batch_size = \", args.batch_size, \", num_steps = \", num_steps)\n            for epoch in range(args.epoch):\n                # early stopping in finetune\n                if stop_count == 50 or increase_count == 50:\n                    print('model convergent at epoch {}, early stopping'.format(epoch))\n                    break\n\n                epoch_train_loss = 0\n                epoch_train_acc = 0\n                model.train()\n                classifier.train()\n                for x, y in train_loader:\n                    optimizer.zero_grad()\n                    pred = model(x)\n                    pred = classifier(pred)\n\n                    step_loss = loss(pred, y)\n                    step_loss.backward()\n                    optimizer.step()\n\n                    epoch_train_loss += step_loss.item()\n                    epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n                epoch_train_loss /= num_steps\n                epoch_train_acc /= num_steps\n\n                model.eval()\n                classifier.eval()\n                val_loss, val_accu = evaluate(val_loader, model, classifier, loss, device)\n                if min_val_loss > val_loss:\n                    min_val_loss = val_loss\n                    end_val_epoch = epoch\n                    test_loss, test_accuracy = evaluate(test_loader, model, classifier, loss, device)\n\n                if epoch % 100 == 0:\n                    print(\n                        \"epoch : {}, train loss: {} , train accuracy : {}, \\nval loss : {}, val accuracy : {}, \\ntest loss : {}, test accuracy : {}\".format(\n                            epoch, epoch_train_loss, epoch_train_acc, val_loss, val_accu, test_loss, test_accuracy))\n\n                if abs(last_loss - val_loss) <= 1e-4:\n                    stop_count += 1\n                else:\n                    stop_count = 0\n\n                if val_loss > last_loss:\n                    increase_count += 1\n                else:\n                    increase_count = 0\n\n                last_loss = val_loss\n            test_accuracies.append(test_accuracy)\n            end_val_epochs.append(end_val_epoch)\n            t = time.time() - t\n            train_time += t\n\n            print('{} fold finish training'.format(i))\n\n        test_accuracies = torch.Tensor(test_accuracies)\n        end_val_epochs = np.array(end_val_epochs)\n        save_cls_result(args, test_accu=torch.mean(test_accuracies), test_std=torch.std(test_accuracies),\n                        train_time=train_time / 5, end_val_epoch=np.mean(end_val_epochs), seeds=args.random_seed)\n        print('Done!')\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/datautils.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom datetime import datetime\nimport pickle\nfrom utils import pkl_load, pad_nan_to_target\nfrom scipy.io.arff import loadarff\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.model_selection import StratifiedKFold\n\n\ndef load_UCR(dataset):\n    dataroot = 'datasets/UCR'\n    dataroot = '/dev_data/lz/time_series_pretrain/datasets/UCRArchive_2018'\n    train_file = os.path.join(dataroot, dataset, dataset + \"_TRAIN.tsv\")\n    test_file = os.path.join(dataroot, dataset, dataset + \"_TEST.tsv\")\n    train_df = pd.read_csv(train_file, sep='\\t', header=None)\n    test_df = pd.read_csv(test_file, sep='\\t', header=None)\n    train_array = np.array(train_df)\n    test_array = np.array(test_df)\n\n    # Move the labels to {0, ..., L-1}\n    labels = np.unique(train_array[:, 0])\n    transform = {}\n    for i, l in enumerate(labels):\n        transform[l] = i\n\n    train = train_array[:, 1:].astype(np.float64)\n    train_labels = np.vectorize(transform.get)(train_array[:, 0])\n    test = test_array[:, 1:].astype(np.float64)\n    test_labels = np.vectorize(transform.get)(test_array[:, 0])\n\n    # Normalization for non-normalized datasets\n    # To keep the amplitude information, we do not normalize values over\n    # individual time series, but on the whole dataset\n    if dataset not in [\n        'AllGestureWiimoteX',\n        'AllGestureWiimoteY',\n        'AllGestureWiimoteZ',\n        'BME',\n        'Chinatown',\n        'Crop',\n        'EOGHorizontalSignal',\n        'EOGVerticalSignal',\n        'Fungi',\n        'GestureMidAirD1',\n        'GestureMidAirD2',\n        'GestureMidAirD3',\n        'GesturePebbleZ1',\n        'GesturePebbleZ2',\n        'GunPointAgeSpan',\n        'GunPointMaleVersusFemale',\n        'GunPointOldVersusYoung',\n        'HouseTwenty',\n        'InsectEPGRegularTrain',\n        'InsectEPGSmallTrain',\n        'MelbournePedestrian',\n        'PickupGestureWiimoteZ',\n        'PigAirwayPressure',\n        'PigArtPressure',\n        'PigCVP',\n        'PLAID',\n        'PowerCons',\n        'Rock',\n        'SemgHandGenderCh2',\n        'SemgHandMovementCh2',\n        'SemgHandSubjectCh2',\n        'ShakeGestureWiimoteZ',\n        'SmoothSubspace',\n        'UMD'\n    ]:\n        return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels\n    \n    mean = np.nanmean(train)\n    std = np.nanstd(train)\n    train = (train - mean) / std\n    test = (test - mean) / std\n    return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels\n\n\ndef load_UEA(dataset):\n    train_data = loadarff(f'datasets/UEA/{dataset}/{dataset}_TRAIN.arff')[0]\n    test_data = loadarff(f'datasets/UEA/{dataset}/{dataset}_TEST.arff')[0]\n    \n    def extract_data(data):\n        res_data = []\n        res_labels = []\n        for t_data, t_label in data:\n            t_data = np.array([ d.tolist() for d in t_data ])\n            t_label = t_label.decode(\"utf-8\")\n            res_data.append(t_data)\n            res_labels.append(t_label)\n        return np.array(res_data).swapaxes(1, 2), np.array(res_labels)\n    \n    train_X, train_y = extract_data(train_data)\n    test_X, test_y = extract_data(test_data)\n    \n    scaler = StandardScaler()\n    scaler.fit(train_X.reshape(-1, train_X.shape[-1]))\n    train_X = scaler.transform(train_X.reshape(-1, train_X.shape[-1])).reshape(train_X.shape)\n    test_X = scaler.transform(test_X.reshape(-1, test_X.shape[-1])).reshape(test_X.shape)\n    \n    labels = np.unique(train_y)\n    transform = { k : i for i, k in enumerate(labels)}\n    train_y = np.vectorize(transform.get)(train_y)\n    test_y = np.vectorize(transform.get)(test_y)\n    return train_X, train_y, test_X, test_y\n    \n    \ndef load_forecast_npy(name, univar=False):\n    data = np.load(f'datasets/{name}.npy')    \n    if univar:\n        data = data[: -1:]\n        \n    train_slice = slice(None, int(0.6 * len(data)))\n    valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))\n    test_slice = slice(int(0.8 * len(data)), None)\n    \n    scaler = StandardScaler().fit(data[train_slice])\n    data = scaler.transform(data)\n    data = np.expand_dims(data, 0)\n\n    pred_lens = [24, 48, 96, 288, 672]\n    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, 0\n\n\ndef _get_time_features(dt):\n    return np.stack([\n        dt.minute.to_numpy(),\n        dt.hour.to_numpy(),\n        dt.dayofweek.to_numpy(),\n        dt.day.to_numpy(),\n        dt.dayofyear.to_numpy(),\n        dt.month.to_numpy(),\n        dt.weekofyear.to_numpy(),\n    ], axis=1).astype(np.float)\n\n\ndef load_forecast_csv(name, univar=False):\n    data = pd.read_csv(f'datasets/{name}.csv', index_col='date', parse_dates=True)\n    dt_embed = _get_time_features(data.index)\n    n_covariate_cols = dt_embed.shape[-1]\n    \n    if univar:\n        if name in ('ETTh1', 'ETTh2', 'ETTm1', 'ETTm2'):\n            data = data[['OT']]\n        elif name == 'electricity':\n            data = data[['MT_001']]\n        else:\n            data = data.iloc[:, -1:]\n        \n    data = data.to_numpy()\n    if name == 'ETTh1' or name == 'ETTh2':\n        train_slice = slice(None, 12*30*24)\n        valid_slice = slice(12*30*24, 16*30*24)\n        test_slice = slice(16*30*24, 20*30*24)\n    elif name == 'ETTm1' or name == 'ETTm2':\n        train_slice = slice(None, 12*30*24*4)\n        valid_slice = slice(12*30*24*4, 16*30*24*4)\n        test_slice = slice(16*30*24*4, 20*30*24*4)\n    else:\n        train_slice = slice(None, int(0.6 * len(data)))\n        valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))\n        test_slice = slice(int(0.8 * len(data)), None)\n    \n    scaler = StandardScaler().fit(data[train_slice])\n    data = scaler.transform(data)\n    if name in ('electricity'):\n        data = np.expand_dims(data.T, -1)  # Each variable is an instance rather than a feature\n    else:\n        data = np.expand_dims(data, 0)\n    \n    if n_covariate_cols > 0:\n        dt_scaler = StandardScaler().fit(dt_embed[train_slice])\n        dt_embed = np.expand_dims(dt_scaler.transform(dt_embed), 0)\n        data = np.concatenate([np.repeat(dt_embed, data.shape[0], axis=0), data], axis=-1)\n    \n    if name in ('ETTh1', 'ETTh2', 'electricity'):\n        pred_lens = [24, 48, 168, 336, 720]\n    else:\n        pred_lens = [24, 48, 96, 288, 672]\n        \n    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols\n\n\ndef load_anomaly(name):\n    res = pkl_load(f'datasets/{name}.pkl')\n    return res['all_train_data'], res['all_train_labels'], res['all_train_timestamps'], \\\n           res['all_test_data'],  res['all_test_labels'],  res['all_test_timestamps'], \\\n           res['delay']\n\n\ndef gen_ano_train_data(all_train_data):\n    maxl = np.max([ len(all_train_data[k]) for k in all_train_data ])\n    pretrain_data = []\n    for k in all_train_data:\n        train_data = pad_nan_to_target(all_train_data[k], maxl, axis=0)\n        pretrain_data.append(train_data)\n    pretrain_data = np.expand_dims(np.stack(pretrain_data), 2)\n    return pretrain_data\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/models/__init__.py",
    "content": "from .encoder import TSEncoder\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/models/dilated_conv.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass SamePadConv(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1):\n        super().__init__()\n        self.receptive_field = (kernel_size - 1) * dilation + 1\n        padding = self.receptive_field // 2\n        self.conv = nn.Conv1d(\n            in_channels, out_channels, kernel_size,\n            padding=padding,\n            dilation=dilation,\n            groups=groups\n        )\n        self.remove = 1 if self.receptive_field % 2 == 0 else 0\n        \n    def forward(self, x):\n        out = self.conv(x)\n        if self.remove > 0:\n            out = out[:, :, : -self.remove]\n        return out\n    \nclass ConvBlock(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation, final=False):\n        super().__init__()\n        self.conv1 = SamePadConv(in_channels, out_channels, kernel_size, dilation=dilation)\n        self.conv2 = SamePadConv(out_channels, out_channels, kernel_size, dilation=dilation)\n        self.projector = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels or final else None\n    \n    def forward(self, x):\n        residual = x if self.projector is None else self.projector(x)\n        x = F.gelu(x)\n        x = self.conv1(x)\n        x = F.gelu(x)\n        x = self.conv2(x)\n        return x + residual\n\nclass DilatedConvEncoder(nn.Module):\n    def __init__(self, in_channels, channels, kernel_size):\n        super().__init__()\n        self.net = nn.Sequential(*[\n            ConvBlock(\n                channels[i-1] if i > 0 else in_channels,\n                channels[i],\n                kernel_size=kernel_size,\n                dilation=2**i,\n                final=(i == len(channels)-1)\n            )\n            for i in range(len(channels))\n        ])\n        \n    def forward(self, x):\n        return self.net(x)\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/models/encoder.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom .dilated_conv import DilatedConvEncoder\n\ndef generate_continuous_mask(B, T, n=5, l=0.1):\n    res = torch.full((B, T), True, dtype=torch.bool)\n    if isinstance(n, float):\n        n = int(n * T)\n    n = max(min(n, T // 2), 1)\n    \n    if isinstance(l, float):\n        l = int(l * T)\n    l = max(l, 1)\n    \n    for i in range(B):\n        for _ in range(n):\n            t = np.random.randint(T-l+1)\n            res[i, t:t+l] = False\n    return res\n\ndef generate_binomial_mask(B, T, p=0.5):\n    return torch.from_numpy(np.random.binomial(1, p, size=(B, T))).to(torch.bool)\n\nclass TSEncoder(nn.Module):\n    def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, mask_mode='binomial'):\n        super().__init__()\n        self.input_dims = input_dims\n        self.output_dims = output_dims\n        self.hidden_dims = hidden_dims\n        self.mask_mode = mask_mode\n        self.input_fc = nn.Linear(input_dims, hidden_dims)\n        self.feature_extractor = DilatedConvEncoder(\n            hidden_dims,\n            [hidden_dims] * depth + [output_dims],\n            kernel_size=3\n        )\n        self.repr_dropout = nn.Dropout(p=0.1)\n        \n    def forward(self, x, mask=None):  # x: B x T x input_dims\n        nan_mask = ~x.isnan().any(axis=-1)\n        x[~nan_mask] = 0\n        x = self.input_fc(x)  # B x T x Ch\n        \n        # generate & apply mask\n        if mask is None:\n            if self.training:\n                mask = self.mask_mode\n            else:\n                mask = 'all_true'\n        \n        if mask == 'binomial':\n            mask = generate_binomial_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'continuous':\n            mask = generate_continuous_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'all_true':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n        elif mask == 'all_false':\n            mask = x.new_full((x.size(0), x.size(1)), False, dtype=torch.bool)\n        elif mask == 'mask_last':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n            mask[:, -1] = False\n        \n        mask &= nan_mask\n        x[~mask] = 0\n        \n        # conv encoder\n        x = x.transpose(1, 2)  # B x Ch x T\n        x = self.repr_dropout(self.feature_extractor(x))  # B x Co x T\n        x = x.transpose(1, 2)  # B x T x Co\n        \n        return x\n        "
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/models/losses.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):\n    loss = torch.tensor(0., device=z1.device)\n    d = 0\n    while z1.size(1) > 1:\n        if alpha != 0:\n            loss += alpha * instance_contrastive_loss(z1, z2)\n        if d >= temporal_unit:\n            if 1 - alpha != 0:\n                loss += (1 - alpha) * temporal_contrastive_loss(z1, z2)\n        d += 1\n        z1 = F.max_pool1d(z1.transpose(1, 2), kernel_size=2).transpose(1, 2)\n        z2 = F.max_pool1d(z2.transpose(1, 2), kernel_size=2).transpose(1, 2)\n    if z1.size(1) == 1:\n        if alpha != 0:\n            loss += alpha * instance_contrastive_loss(z1, z2)\n        d += 1\n    return loss / d\n\ndef instance_contrastive_loss(z1, z2):\n    B, T = z1.size(0), z1.size(1)\n    if B == 1:\n        return z1.new_tensor(0.)\n    z = torch.cat([z1, z2], dim=0)  # 2B x T x C\n    z = z.transpose(0, 1)  # T x 2B x C\n    sim = torch.matmul(z, z.transpose(1, 2))  # T x 2B x 2B\n    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # T x 2B x (2B-1)\n    logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n    logits = -F.log_softmax(logits, dim=-1)\n    \n    i = torch.arange(B, device=z1.device)\n    loss = (logits[:, i, B + i - 1].mean() + logits[:, B + i, i].mean()) / 2\n    return loss\n\ndef temporal_contrastive_loss(z1, z2):\n    B, T = z1.size(0), z1.size(1)\n    if T == 1:\n        return z1.new_tensor(0.)\n    z = torch.cat([z1, z2], dim=1)  # B x 2T x C\n    sim = torch.matmul(z, z.transpose(1, 2))  # B x 2T x 2T\n    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # B x 2T x (2T-1)\n    logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n    logits = -F.log_softmax(logits, dim=-1)\n    \n    t = torch.arange(T, device=z1.device)\n    loss = (logits[:, t, T + t - 1].mean() + logits[:, T + t, t].mean()) / 2\n    return loss\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/result/ts2vec_tsm_train_val_b8_single_norm_0409_cls_result.csv",
    "content": "id,dataset_name,test_accuracy,test_std,train_time,end_val_epoch,seeds\n0,ACSF1,0.88,0.0371,28.4825,0.0,42\n1,Adiac,0.8066,0.0652,5.3666,0.0,42\n2,AllGestureWiimoteX,0.782,0.0164,16.295,0.0,42\n3,AllGestureWiimoteY,0.84,0.015,16.5032,0.0,42\n4,AllGestureWiimoteZ,0.773,0.0527,16.4974,0.0,42\n5,ArrowHead,0.8908,0.0402,5.1086,0.0,42\n6,BME,0.9944,0.0124,4.5481,0.0,42\n7,Beef,0.6833,0.1807,5.6727,0.0,42\n8,BeetleFly,0.925,0.1118,5.8705,0.0,42\n9,BirdChicken,0.975,0.0559,5.8268,0.0,42\n10,CBF,1.0,0.0,5.0538,0.0,42\n11,Car,0.8833,0.0349,6.2595,0.0,42\n12,Chinatown,0.9726,0.0217,3.9859,0.0,42\n13,ChlorineConcentration,0.9998,0.0005,34.2372,0.0,42\n14,CinCECGTorso,0.9972,0.0029,32.5504,0.0,42\n15,Coffee,1.0,0.0,4.9984,0.0,42\n16,Computers,0.6940000000000001,0.0422,18.2591,0.0,42\n17,CricketX,0.8321,0.0394,13.9189,0.0,42\n18,CricketY,0.8256,0.0461,13.9781,0.0,42\n19,CricketZ,0.8487,0.0242,13.9948,0.0,42\n20,Crop,0.7448,0.009000000000000001,154.3218,0.0,42\n21,DiatomSizeReduction,0.9969,0.0069,5.4306,0.0,42\n22,DistalPhalanxOutlineAgeGroup,0.8125,0.0295,4.6066,0.0,42\n23,DistalPhalanxOutlineCorrect,0.8185,0.0388,5.0256,0.0,42\n24,DistalPhalanxTW,0.7792,0.0384,4.5488,0.0,42\n25,DodgerLoopDay,0.6391,0.0575,5.1768,0.0,42\n26,DodgerLoopGame,0.9563,0.0523,5.1137,0.0,42\n27,DodgerLoopWeekend,0.9812,0.027999999999999997,5.0872,0.0,42\n28,ECG200,0.88,0.0112,4.4465,0.0,42\n29,ECG5000,0.9528,0.0059,24.8707,0.0,42\n30,ECGFiveDays,1.0,0.0,4.9662,0.0,42\n31,EOGHorizontalSignal,0.7279,0.0204,25.4733,0.0,42\n32,EOGVerticalSignal,0.6519,0.0083,25.5223,0.0,42\n33,Earthquakes,0.7983,0.0056,15.8322,0.0,42\n34,ElectricDevices,0.8848,0.0042,915.319,0.0,42\n35,EthanolLevel,0.5897,0.032,35.0616,0.0,42\n36,FaceAll,0.9871,0.004,15.0294,0.0,42\n37,FaceFour,0.9640000000000001,0.0379,5.506,0.0,42\n38,FacesUCR,0.9902,0.004,15.3127,0.0,42\n39,FiftyWords,0.8022,0.0306,14.235999999999999,0.0,42\n40,Fish,0.9343,0.0329,5.8832,0.0,42\n41,FordA,0.9303,0.0044,39.9856,0.0,42\n42,FordB,0.9132,0.0134,35.6743,0.0,42\n43,FreezerRegularTrain,0.9977,0.0009,18.0955,0.0,42\n44,FreezerSmallTrain,0.9969,0.0036,17.025,0.0,42\n45,Fungi,1.0,0.0,4.8975,0.0,42\n46,GestureMidAirD1,0.6479,0.0531,5.5628,0.0,42\n47,GestureMidAirD2,0.527,0.1203,5.5954,0.0,42\n48,GestureMidAirD3,0.3315,0.0435,5.5434,0.0,42\n49,GesturePebbleZ1,0.9507,0.0327,5.9473,0.0,42\n50,GesturePebbleZ2,0.9473,0.0215,5.8398,0.0,42\n51,GunPoint,0.995,0.0112,4.7644,0.0,42\n52,GunPointAgeSpan,0.9889,0.0079,4.8011,0.0,42\n53,GunPointMaleVersusFemale,0.9956,0.0061,4.7437,0.0,42\n54,GunPointOldVersusYoung,0.9956,0.0061,4.8197,0.0,42\n55,Ham,0.8741,0.0346,5.7141,0.0,42\n56,HandOutlines,0.9168,0.0142,57.6561,0.0,42\n57,Haptics,0.5594,0.0468,22.9492,0.0,42\n58,Herring,0.6806,0.0757,5.9701,0.0,42\n59,HouseTwenty,0.9496,0.0358,38.364000000000004,0.0,42\n60,InlineSkate,0.6415,0.0311,36.4749,0.0,42\n61,InsectEPGRegularTrain,0.9807,0.006999999999999999,16.8548,0.0,42\n62,InsectEPGSmallTrain,0.9699,0.0254,6.4435,0.0,42\n63,InsectWingbeatSound,0.7077,0.0057,16.7995,0.0,42\n64,ItalyPowerDemand,0.9745,0.0083,4.5284,0.0,42\n65,LargeKitchenAppliances,0.9147,0.0119,18.617,0.0,42\n66,Lightning2,0.901,0.0365,6.4622,0.0,42\n67,Lightning7,0.8389,0.06,5.2601,0.0,42\n68,Mallat,0.9962,0.0023,26.1331,0.0,42\n69,Meat,1.0,0.0,6.2323,0.0,42\n70,MedicalImages,0.8431,0.0394,6.0271,0.0,42\n71,MelbournePedestrian,0.8986,0.0059,20.6329,0.0,42\n72,MiddlePhalanxOutlineAgeGroup,0.7347,0.0647,4.865,0.0,42\n73,MiddlePhalanxOutlineCorrect,0.8507,0.0192,5.2434,0.0,42\n74,MiddlePhalanxTW,0.6455,0.0219,5.0957,0.0,42\n75,MixedShapesRegularTrain,0.945,0.006999999999999999,26.7892,0.0,42\n76,MixedShapesSmallTrain,0.941,0.0081,25.0754,0.0,42\n77,MoteStrain,0.9717,0.0032,5.2492,0.0,42\n78,NonInvasiveFetalECGThorax1,0.9392,0.011,28.4395,0.0,42\n79,NonInvasiveFetalECGThorax2,0.9490000000000001,0.0088,28.016,0.0,42\n80,OSULeaf,0.8937,0.0292,14.9081,0.0,42\n81,OliveOil,0.85,0.0697,6.1428,0.0,42\n82,PLAID,0.5503,0.0361,110.9831,0.0,42\n83,PhalangesOutlinesCorrect,0.8439,0.0156,17.1865,0.0,42\n84,Phoneme,0.4204,0.0122,26.7327,0.0,42\n85,PickupGestureWiimoteZ,0.84,0.0548,5.4558,0.0,42\n86,PigAirwayPressure,0.4038,0.0475,38.5497,0.0,42\n87,PigArtPressure,0.9424,0.0307,38.7209,0.0,42\n88,PigCVP,0.8753,0.0514,38.5487,0.0,42\n89,Plane,0.9905,0.0213,5.0078,0.0,42\n90,PowerCons,0.9861,0.0098,4.8548,0.0,42\n91,ProximalPhalanxOutlineAgeGroup,0.8545,0.0344,4.6571,0.0,42\n92,ProximalPhalanxOutlineCorrect,0.8833,0.0257,4.9042,0.0,42\n93,ProximalPhalanxTW,0.8281,0.0179,4.646,0.0,42\n94,RefrigerationDevices,0.7627,0.0234,18.778,0.0,42\n95,Rock,0.8286,0.0814,58.8638,0.0,42\n96,ScreenType,0.552,0.0338,18.7799,0.0,42\n97,SemgHandGenderCh2,0.9544,0.0234,29.3616,0.0,42\n98,SemgHandMovementCh2,0.7767,0.0416,29.8932,0.0,42\n99,SemgHandSubjectCh2,0.9344,0.0169,29.4465,0.0,42\n100,ShakeGestureWiimoteZ,0.93,0.0447,5.6136,0.0,42\n101,ShapeletSim,0.985,0.0137,5.9648,0.0,42\n102,ShapesAll,0.9133,0.0104,17.5754,0.0,42\n103,SmallKitchenAppliances,0.7347,0.0674,18.6373,0.0,42\n104,SmoothSubspace,0.9633,0.0361,3.8639,0.0,42\n105,SonyAIBORobotSurface1,0.9952,0.0044,4.5268,0.0,42\n106,SonyAIBORobotSurface2,0.9949,0.0051,4.6777,0.0,42\n107,StarLightCurves,0.9801,0.0042,77.98,0.0,42\n108,Strawberry,0.9685,0.0132,29.3446,0.0,42\n109,SwedishLeaf,0.9502,0.0262,5.5732,0.0,42\n110,Symbols,0.9863,0.0122,15.0173,0.0,42\n111,SyntheticControl,0.9983,0.0037,4.5299,0.0,42\n112,ToeSegmentation1,0.9588,0.041,5.1871,0.0,42\n113,ToeSegmentation2,0.9517,0.0273,5.4543,0.0,42\n114,Trace,1.0,0.0,5.1909,0.0,42\n115,TwoLeadECG,0.9991,0.0019,5.1407,0.0,42\n116,TwoPatterns,1.0,0.0,31.0201,0.0,42\n117,UMD,0.9944,0.0124,4.8644,0.0,42\n118,UWaveGestureLibraryAll,0.9652,0.0106,34.1178,0.0,42\n119,UWaveGestureLibraryX,0.8513,0.0161,26.5674,0.0,42\n120,UWaveGestureLibraryY,0.7874,0.0046,29.9703,0.0,42\n121,UWaveGestureLibraryZ,0.8024,0.0141,27.9695,0.0,42\n122,Wafer,0.999,0.0006,23.6,0.0,42\n123,Wine,0.9648,0.0568,5.0664,0.0,42\n124,WordSynonyms,0.7989,0.0186,13.976,0.0,42\n125,Worms,0.721,0.0309,20.011,0.0,42\n126,WormsTwoClass,0.7716,0.0727,20.1901,0.0,42\n127,Yoga,0.9709,0.0058,26.4065,0.00,42\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/scripts/generator_ts2vec.py",
    "content": "ucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n               'Beef',\n               'BeetleFly', 'BirdChicken', 'CBF', 'Car', 'Chinatown', 'ChlorineConcentration', 'CinCECGTorso', 'Coffee',\n               'Computers',\n               'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction', 'DistalPhalanxOutlineAgeGroup',\n               'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame', 'DodgerLoopWeekend',\n               'ECG200', 'ECG5000', 'ECGFiveDays', 'EOGHorizontalSignal', 'EOGVerticalSignal', 'Earthquakes',\n               'ElectricDevices',\n               'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords', 'Fish', 'FordA', 'FordB',\n               'FreezerRegularTrain',\n               'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3', 'GesturePebbleZ1',\n               'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan', 'GunPointMaleVersusFemale', 'GunPointOldVersusYoung',\n               'Ham',\n               'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain',\n               'InsectEPGSmallTrain',\n               'InsectWingbeatSound', 'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2', 'Lightning7',\n               'Mallat', 'Meat',\n               'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',\n               'MiddlePhalanxTW', 'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain',\n               'NonInvasiveFetalECGThorax1',\n               'NonInvasiveFetalECGThorax2', 'OSULeaf', 'OliveOil', 'PLAID', 'PhalangesOutlinesCorrect', 'Phoneme',\n               'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'Plane', 'PowerCons',\n               'ProximalPhalanxOutlineAgeGroup', 'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',\n               'RefrigerationDevices',\n               'Rock', 'ScreenType', 'SemgHandGenderCh2', 'SemgHandMovementCh2', 'SemgHandSubjectCh2',\n               'ShakeGestureWiimoteZ',\n               'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace', 'SonyAIBORobotSurface1',\n               'SonyAIBORobotSurface2', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',\n               'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'TwoPatterns', 'UMD',\n               'UWaveGestureLibraryAll',\n               'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine', 'WordSynonyms',\n               'Worms',\n               'WormsTwoClass', 'Yoga']\n\ni = 0\nfor dataset in ucr_dataset:\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n    # '''\n    # python train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Coffee --fcn_epoch 1000 --gpu 1 --batch-size 8 --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1\n    # '''\n    # with open('/SSD/lz/time_tsm/ts2vec_cls/scripts/ts2vec_fcn_set_norm.sh', 'a') as f:\n    #     f.write('python train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 '\n    #             '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set '\n    #             '--dataset ' + dataset\n    #             + ' --fcn_epoch 1000 --gpu 1 --batch-size 8 ' +\n    #             ' --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1' + ';\\n')\n    #\n    # '''\n    # python train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single  --dataset Coffee --fcn_epoch 1000 --gpu 1 --batch-size 8 --loss cross_entropy --save_csv_name ts2vec_fcn_single_norm_0404_ --cuda cuda:1\n    # '''\n    # with open('/SSD/lz/time_tsm/ts2vec_cls/scripts/ts2vec_fcn_single_norm.sh', 'a') as f:\n    #     f.write('python train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 '\n    #             '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n    #             '--dataset ' + dataset\n    #             + ' --fcn_epoch 1000 --gpu 1 --batch-size 8 ' +\n    #             ' --save_csv_name ts2vec_fcn_single_norm_0404_ --cuda cuda:1' + ';\\n')\n\n\n    # '''\n    # python train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Coffee --gpu 1 --batch-size 8 --save_csv_name ts2vec_tsm_set_norm_0404_\n    # '''\n    # with open('/SSD/lz/time_tsm/ts2vec_cls/scripts/ts2vec_tsm_set_norm.sh', 'a') as f:\n    #      f.write('python train_tsm.py '\n    #             '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set '\n    #             '--dataset ' + dataset\n    #             + ' --gpu 1 --batch-size 8 ' +\n    #             ' --save_csv_name ts2vec_tsm_set_norm_0404_' + ';\\n')\n    #\n    '''\n       python train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --gpu 1 --batch-size 8 --save_csv_name ts2vec_tsm_single_norm_0404_\n    '''\n    with open('/SSD/lz/time_tsm/ts2vec_cls/scripts/ts2vec_tsm_single_norm.sh', 'a') as f:\n        f.write('python train_tsm.py '\n                '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n                '--dataset ' + dataset\n                + ' --gpu 1 --batch-size 8 ' +\n                ' --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_' + ';\\n')\n\n\ni = 0\nfor dataset in ucr_dataset:\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n    '''\n          python train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --gpu 1 --batch-size 16 --save_csv_name ts2vec_tsm_single_norm_0404_\n       '''\n    with open('/SSD/lz/time_tsm/ts2vec_cls/scripts/ts2vec_tsm_single_norm.sh', 'a') as f:\n        f.write('python train_tsm.py '\n                '--dataroot /SSD/lz/UCRArchive_2018 --normalize_way single '\n                '--dataset ' + dataset\n                + ' --gpu 1 --batch-size 16 ' +\n                ' --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_' + ';\\n')\n\n\n## nohup ./scripts/ts2vec_fcn_set_norm.sh &\n## nohup ./scripts/ts2vec_fcn_single_norm.sh &\n\n## nohup ./scripts/ts2vec_tsm_set_norm.sh &\n## nohup ./scripts/ts2vec_tsm_single_norm.sh &"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/scripts/generator_ts2vec_uea.py",
    "content": "uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Cricket', 'DuckDuckGeese', 'EigenWorms', 'Epilepsy', 'EthanolConcentration', 'ERing',\n           'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting',\n           'Heartbeat', 'InsectWingbeat', 'JapaneseVowels', 'Libras', 'LSST', 'MotorImagery',\n           'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1',\n           'SelfRegulationSCP2', 'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary']\n\ni = 0\nfor dataset in uea_all:\n    print(\"i = \", i, \"dataset_name = \", dataset)\n    i = i + 1\n    '''\n          python train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset BasicMotions --gpu 1 --batch-size 8 --save_csv_name ts2vec_tsm_uea_0423_\n       '''\n    with open('/SSD/lz/time_tsm/ts2vec_cls/scripts/ts2vec_tsm_uea.sh', 'a') as f:\n        f.write('python train_tsm_uea.py '\n                '--dataroot /SSD/lz/Multivariate2018_arff '\n                '--dataset ' + dataset\n                + ' --gpu 1 --batch-size 8 ' +\n                ' --save_csv_name ts2vec_tsm_uea_0423_' + ';\\n')\n\n## nohup ./scripts/ts2vec_tsm_uea.sh &\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/scripts/ts2vec_fcn_set_norm.sh",
    "content": "python train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ACSF1 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Adiac --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteX --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteY --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteZ --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ArrowHead --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BME --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Beef --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BeetleFly --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BirdChicken --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CBF --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Car --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Chinatown --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ChlorineConcentration --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CinCECGTorso --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Coffee --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Computers --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketX --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketY --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketZ --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Crop --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DiatomSizeReduction --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxOutlineAgeGroup --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxOutlineCorrect --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxTW --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopDay --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopGame --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopWeekend --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECG200 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECG5000 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECGFiveDays --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EOGHorizontalSignal --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EOGVerticalSignal --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Earthquakes --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ElectricDevices --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EthanolLevel --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FaceAll --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FaceFour --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FacesUCR --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FiftyWords --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Fish --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FordA --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FordB --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FreezerRegularTrain --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FreezerSmallTrain --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Fungi --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD1 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD3 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GesturePebbleZ1 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GesturePebbleZ2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPoint --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointAgeSpan --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointMaleVersusFemale --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointOldVersusYoung --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Ham --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset HandOutlines --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Haptics --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Herring --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset HouseTwenty --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InlineSkate --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectEPGRegularTrain --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectEPGSmallTrain --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectWingbeatSound --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ItalyPowerDemand --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset LargeKitchenAppliances --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Lightning2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Lightning7 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Mallat --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Meat --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MedicalImages --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MelbournePedestrian --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxOutlineAgeGroup --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxOutlineCorrect --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxTW --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MixedShapesRegularTrain --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MixedShapesSmallTrain --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MoteStrain --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset NonInvasiveFetalECGThorax1 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset NonInvasiveFetalECGThorax2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset OSULeaf --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset OliveOil --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PLAID --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PhalangesOutlinesCorrect --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Phoneme --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PickupGestureWiimoteZ --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigAirwayPressure --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigArtPressure --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigCVP --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Plane --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PowerCons --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxOutlineAgeGroup --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxOutlineCorrect --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxTW --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset RefrigerationDevices --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Rock --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ScreenType --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandGenderCh2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandMovementCh2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandSubjectCh2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShakeGestureWiimoteZ --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShapeletSim --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShapesAll --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SmallKitchenAppliances --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SmoothSubspace --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SonyAIBORobotSurface1 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SonyAIBORobotSurface2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset StarLightCurves --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Strawberry --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SwedishLeaf --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Symbols --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SyntheticControl --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ToeSegmentation1 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ToeSegmentation2 --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Trace --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset TwoLeadECG --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset TwoPatterns --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UMD --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryAll --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryX --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryY --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryZ --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Wafer --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Wine --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset WordSynonyms --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Worms --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset WormsTwoClass --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\npython train_fcn.py --backbone fcn --classifier nonlinear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Yoga --fcn_epoch 1000 --gpu 1 --batch-size 8  --loss cross_entropy --save_csv_name ts2vec_fcn_set_norm_0404_ --cuda cuda:1;\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/scripts/ts2vec_fcn_single_norm.sh",
    "content": ""
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/scripts/ts2vec_tsm_set_norm.sh",
    "content": "python train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ACSF1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Adiac --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteX --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteY --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset AllGestureWiimoteZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ArrowHead --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BME --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Beef --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BeetleFly --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset BirdChicken --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CBF --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Car --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Chinatown --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ChlorineConcentration --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CinCECGTorso --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Coffee --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Computers --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketX --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketY --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset CricketZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Crop --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DiatomSizeReduction --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxOutlineAgeGroup --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxOutlineCorrect --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DistalPhalanxTW --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopDay --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopGame --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset DodgerLoopWeekend --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECG200 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECG5000 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ECGFiveDays --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EOGHorizontalSignal --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EOGVerticalSignal --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Earthquakes --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ElectricDevices --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset EthanolLevel --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FaceAll --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FaceFour --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FacesUCR --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FiftyWords --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Fish --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FordA --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FordB --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FreezerRegularTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset FreezerSmallTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Fungi --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GestureMidAirD3 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GesturePebbleZ1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GesturePebbleZ2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPoint --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointAgeSpan --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointMaleVersusFemale --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset GunPointOldVersusYoung --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Ham --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset HandOutlines --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Haptics --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Herring --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset HouseTwenty --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InlineSkate --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectEPGRegularTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectEPGSmallTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset InsectWingbeatSound --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ItalyPowerDemand --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset LargeKitchenAppliances --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Lightning2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Lightning7 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Mallat --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Meat --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MedicalImages --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MelbournePedestrian --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxOutlineAgeGroup --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxOutlineCorrect --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MiddlePhalanxTW --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MixedShapesRegularTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MixedShapesSmallTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset MoteStrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset NonInvasiveFetalECGThorax1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset NonInvasiveFetalECGThorax2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset OSULeaf --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset OliveOil --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PLAID --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PhalangesOutlinesCorrect --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Phoneme --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PickupGestureWiimoteZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigAirwayPressure --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigArtPressure --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PigCVP --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Plane --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset PowerCons --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxOutlineAgeGroup --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxOutlineCorrect --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ProximalPhalanxTW --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset RefrigerationDevices --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Rock --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ScreenType --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandGenderCh2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandMovementCh2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SemgHandSubjectCh2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShakeGestureWiimoteZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShapeletSim --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ShapesAll --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SmallKitchenAppliances --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SmoothSubspace --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SonyAIBORobotSurface1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SonyAIBORobotSurface2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset StarLightCurves --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Strawberry --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SwedishLeaf --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Symbols --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset SyntheticControl --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ToeSegmentation1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset ToeSegmentation2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Trace --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset TwoLeadECG --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset TwoPatterns --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UMD --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryAll --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryX --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryY --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset UWaveGestureLibraryZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Wafer --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Wine --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset WordSynonyms --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Worms --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset WormsTwoClass --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way train_set --dataset Yoga --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_set_norm_0404_;\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/scripts/ts2vec_tsm_single_norm.sh",
    "content": "python train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Adiac --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteX --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteY --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ArrowHead --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BME --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Beef --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BeetleFly --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BirdChicken --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CBF --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Car --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Chinatown --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ChlorineConcentration --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CinCECGTorso --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Computers --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketX --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketY --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Crop --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DiatomSizeReduction --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineAgeGroup --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineCorrect --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxTW --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopDay --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopGame --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopWeekend --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG200 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG5000 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECGFiveDays --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGHorizontalSignal --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGVerticalSignal --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Earthquakes --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ElectricDevices --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EthanolLevel --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceAll --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceFour --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FacesUCR --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FiftyWords --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fish --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordA --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordB --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerRegularTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerSmallTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fungi --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD3 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPoint --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointAgeSpan --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointMaleVersusFemale --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointOldVersusYoung --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Ham --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HandOutlines --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Haptics --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Herring --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HouseTwenty --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InlineSkate --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGRegularTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGSmallTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectWingbeatSound --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ItalyPowerDemand --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset LargeKitchenAppliances --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning7 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Mallat --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Meat --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MedicalImages --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MelbournePedestrian --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineAgeGroup --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineCorrect --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxTW --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesRegularTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesSmallTrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MoteStrain --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OSULeaf --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OliveOil --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PLAID --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PhalangesOutlinesCorrect --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Phoneme --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PickupGestureWiimoteZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigAirwayPressure --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigArtPressure --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigCVP --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Plane --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PowerCons --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineAgeGroup --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineCorrect --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxTW --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset RefrigerationDevices --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Rock --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ScreenType --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandGenderCh2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandMovementCh2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandSubjectCh2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShakeGestureWiimoteZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapeletSim --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapesAll --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmallKitchenAppliances --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmoothSubspace --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset StarLightCurves --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Strawberry --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SwedishLeaf --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Symbols --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SyntheticControl --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Trace --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoLeadECG --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoPatterns --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UMD --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryAll --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryX --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryY --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryZ --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wafer --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wine --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WordSynonyms --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Worms --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WormsTwoClass --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Yoga --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_train_val_b8_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ACSF1 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Adiac --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteX --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteY --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset AllGestureWiimoteZ --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ArrowHead --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BME --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Beef --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BeetleFly --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset BirdChicken --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CBF --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Car --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Chinatown --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ChlorineConcentration --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CinCECGTorso --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Coffee --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Computers --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketX --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketY --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset CricketZ --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Crop --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DiatomSizeReduction --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineAgeGroup --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxOutlineCorrect --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DistalPhalanxTW --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopDay --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopGame --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset DodgerLoopWeekend --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG200 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECG5000 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ECGFiveDays --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGHorizontalSignal --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EOGVerticalSignal --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Earthquakes --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ElectricDevices --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset EthanolLevel --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceAll --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FaceFour --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FacesUCR --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FiftyWords --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fish --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordA --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FordB --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerRegularTrain --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset FreezerSmallTrain --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Fungi --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD1 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GestureMidAirD3 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ1 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GesturePebbleZ2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPoint --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointAgeSpan --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointMaleVersusFemale --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset GunPointOldVersusYoung --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Ham --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HandOutlines --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Haptics --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Herring --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset HouseTwenty --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InlineSkate --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGRegularTrain --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectEPGSmallTrain --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset InsectWingbeatSound --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ItalyPowerDemand --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset LargeKitchenAppliances --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Lightning7 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Mallat --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Meat --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MedicalImages --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MelbournePedestrian --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineAgeGroup --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxOutlineCorrect --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MiddlePhalanxTW --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesRegularTrain --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MixedShapesSmallTrain --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset MoteStrain --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax1 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset NonInvasiveFetalECGThorax2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OSULeaf --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset OliveOil --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PLAID --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PhalangesOutlinesCorrect --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Phoneme --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PickupGestureWiimoteZ --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigAirwayPressure --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigArtPressure --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PigCVP --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Plane --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset PowerCons --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineAgeGroup --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxOutlineCorrect --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ProximalPhalanxTW --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset RefrigerationDevices --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Rock --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ScreenType --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandGenderCh2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandMovementCh2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SemgHandSubjectCh2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShakeGestureWiimoteZ --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapeletSim --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ShapesAll --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmallKitchenAppliances --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SmoothSubspace --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface1 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SonyAIBORobotSurface2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset StarLightCurves --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Strawberry --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SwedishLeaf --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Symbols --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset SyntheticControl --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation1 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset ToeSegmentation2 --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Trace --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoLeadECG --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset TwoPatterns --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UMD --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryAll --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryX --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryY --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset UWaveGestureLibraryZ --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wafer --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Wine --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WordSynonyms --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Worms --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset WormsTwoClass --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\npython train_tsm.py --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataset Yoga --gpu 1 --batch-size 16  --save_csv_name ts2vec_tsm_train_val_b16_single_norm_0409_;\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/scripts/ts2vec_tsm_uea.sh",
    "content": "python train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset ArticularyWordRecognition --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset AtrialFibrillation --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset BasicMotions --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset CharacterTrajectories --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset Cricket --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset DuckDuckGeese --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset EigenWorms --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset Epilepsy --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset EthanolConcentration --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset ERing --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset FaceDetection --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset FingerMovements --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset HandMovementDirection --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset Handwriting --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset Heartbeat --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset InsectWingbeat --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset JapaneseVowels --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset Libras --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset LSST --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset MotorImagery --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset NATOPS --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset PenDigits --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset PEMS-SF --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset PhonemeSpectra --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset RacketSports --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset SelfRegulationSCP1 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset SelfRegulationSCP2 --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset SpokenArabicDigits --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset StandWalkJump --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\npython train_tsm_uea.py --dataroot /SSD/lz/Multivariate2018_arff --dataset UWaveGestureLibrary --gpu 1 --batch-size 8  --save_csv_name ts2vec_tsm_uea_0423_;\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/tasks/__init__.py",
    "content": "from .classification import eval_classification\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/tasks/_eval_protocols.py",
    "content": "import numpy as np\nfrom sklearn.linear_model import Ridge\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import GridSearchCV, train_test_split\n\ndef fit_svm(features, y, MAX_SAMPLES=10000):\n    nb_classes = np.unique(y, return_counts=True)[1].shape[0]\n    train_size = features.shape[0]\n\n    svm = SVC(C=np.inf, gamma='scale')\n    if train_size // nb_classes < 5 or train_size < 50:\n        return svm.fit(features, y)\n    else:\n        grid_search = GridSearchCV(\n            svm, {\n                'C': [\n                    0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000,\n                    np.inf\n                ],\n                'kernel': ['rbf'],\n                'degree': [3],\n                'gamma': ['scale'],\n                'coef0': [0],\n                'shrinking': [True],\n                'probability': [False],\n                'tol': [0.001],\n                'cache_size': [200],\n                'class_weight': [None],\n                'verbose': [False],\n                'max_iter': [10000000],\n                'decision_function_shape': ['ovr'],\n                'random_state': [None]\n            },\n            cv=5, n_jobs=5\n        )\n        # If the training set is too large, subsample MAX_SAMPLES examples\n        if train_size > MAX_SAMPLES:\n            split = train_test_split(\n                features, y,\n                train_size=MAX_SAMPLES, random_state=0, stratify=y\n            )\n            features = split[0]\n            y = split[2]\n            \n        grid_search.fit(features, y)\n        return grid_search.best_estimator_\n\ndef fit_lr(features, y, MAX_SAMPLES=100000):\n    # If the training set is too large, subsample MAX_SAMPLES examples\n    if features.shape[0] > MAX_SAMPLES:\n        split = train_test_split(\n            features, y,\n            train_size=MAX_SAMPLES, random_state=0, stratify=y\n        )\n        features = split[0]\n        y = split[2]\n        \n    pipe = make_pipeline(\n        StandardScaler(),\n        LogisticRegression(\n            random_state=0,\n            max_iter=1000000,\n            multi_class='ovr'\n        )\n    )\n    pipe.fit(features, y)\n    return pipe\n\ndef fit_knn(features, y):\n    pipe = make_pipeline(\n        StandardScaler(),\n        KNeighborsClassifier(n_neighbors=1)\n    )\n    pipe.fit(features, y)\n    return pipe\n\ndef fit_ridge(train_features, train_y, valid_features, valid_y, MAX_SAMPLES=100000):\n    # If the training set is too large, subsample MAX_SAMPLES examples\n    if train_features.shape[0] > MAX_SAMPLES:\n        split = train_test_split(\n            train_features, train_y,\n            train_size=MAX_SAMPLES, random_state=0\n        )\n        train_features = split[0]\n        train_y = split[2]\n    if valid_features.shape[0] > MAX_SAMPLES:\n        split = train_test_split(\n            valid_features, valid_y,\n            train_size=MAX_SAMPLES, random_state=0\n        )\n        valid_features = split[0]\n        valid_y = split[2]\n    \n    alphas = [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]\n    valid_results = []\n    for alpha in alphas:\n        lr = Ridge(alpha=alpha).fit(train_features, train_y)\n        valid_pred = lr.predict(valid_features)\n        score = np.sqrt(((valid_pred - valid_y) ** 2).mean()) + np.abs(valid_pred - valid_y).mean()\n        valid_results.append(score)\n    best_alpha = alphas[np.argmin(valid_results)]\n    \n    lr = Ridge(alpha=best_alpha)\n    lr.fit(train_features, train_y)\n    return lr\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/tasks/classification.py",
    "content": "import numpy as np\nfrom . import _eval_protocols as eval_protocols\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.metrics import average_precision_score\n\ndef eval_classification(model, train_data, train_labels, test_data, test_labels, eval_protocol='linear'):\n    assert train_labels.ndim == 1 or train_labels.ndim == 2\n    train_repr = model.encode(train_data, encoding_window='full_series' if train_labels.ndim == 1 else None)\n    test_repr = model.encode(test_data, encoding_window='full_series' if train_labels.ndim == 1 else None)\n\n    if eval_protocol == 'linear':\n        fit_clf = eval_protocols.fit_lr\n    elif eval_protocol == 'svm':\n        fit_clf = eval_protocols.fit_svm\n    elif eval_protocol == 'knn':\n        fit_clf = eval_protocols.fit_knn\n    else:\n        assert False, 'unknown evaluation protocol'\n\n    def merge_dim01(array):\n        return array.reshape(array.shape[0]*array.shape[1], *array.shape[2:])\n\n    if train_labels.ndim == 2:\n        train_repr = merge_dim01(train_repr)\n        train_labels = merge_dim01(train_labels)\n        test_repr = merge_dim01(test_repr)\n        test_labels = merge_dim01(test_labels)\n\n    clf = fit_clf(train_repr, train_labels)\n\n    acc = clf.score(test_repr, test_labels)\n    train_acc = clf.score(train_repr, train_labels)\n    if eval_protocol == 'linear':\n        y_score = clf.predict_proba(test_repr)\n    else:\n        y_score = clf.decision_function(test_repr)\n    test_labels_onehot = label_binarize(test_labels, classes=np.arange(train_labels.max()+1))\n    auprc = average_precision_score(test_labels_onehot, y_score)\n    \n    return y_score, {'acc': acc, 'auprc': auprc, 'train_acc': train_acc}\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/train.py",
    "content": "import torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom ts2vec.ts2vec import TS2Vec\nfrom ts2vec import datautils, tasks\nfrom ts2vec.utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\ndef save_checkpoint_callback(\n    save_every=1,\n    unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n    return callback\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('dataset', help='The dataset name')\n    parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, required=True, help='The data loader used to load the experimental data. This can be set to UCR, UEA, forecast_csv, forecast_csv_univar, anomaly, or anomaly_coldstart')\n    parser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None, help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None, help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', action=\"store_true\", help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n    args = parser.parse_args()\n    \n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n    \n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n    \n    print('Loading data... ', end='')\n    if args.loader == 'UCR':\n        task_type = 'classification'\n        train_data, train_labels, test_data, test_labels = datautils.load_UCR(args.dataset)\n        \n    elif args.loader == 'UEA':\n        task_type = 'classification'\n        train_data, train_labels, test_data, test_labels = datautils.load_UEA(args.dataset)\n        \n    elif args.loader == 'forecast_csv':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(args.dataset)\n        train_data = data[:, train_slice]\n        \n    elif args.loader == 'forecast_csv_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(args.dataset, univar=True)\n        train_data = data[:, train_slice]\n        \n    elif args.loader == 'forecast_npy':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(args.dataset)\n        train_data = data[:, train_slice]\n        \n    elif args.loader == 'forecast_npy_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(args.dataset, univar=True)\n        train_data = data[:, train_slice]\n        \n    elif args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data = datautils.gen_ano_train_data(all_train_data)\n        \n    elif args.loader == 'anomaly_coldstart':\n        task_type = 'anomaly_detection_coldstart'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data, _, _, _ = datautils.load_UCR('FordA')\n        \n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n        \n        \n    if args.irregular > 0:\n        if task_type == 'classification':\n            train_data = data_dropout(train_data, args.irregular)\n            test_data = data_dropout(test_data, args.irregular)\n        else:\n            raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n    \n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n    \n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n    \n    t = time.time()\n    \n    model = TS2Vec(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.fit(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\\n\")\n\n    if args.eval:\n        if task_type == 'classification':\n            out, eval_res = tasks.eval_classification(model, train_data, train_labels, test_data, test_labels, eval_protocol='svm')\n        elif task_type == 'forecasting':\n            out, eval_res = tasks.eval_forecasting(model, data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols)\n        elif task_type == 'anomaly_detection':\n            out, eval_res = tasks.eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        elif task_type == 'anomaly_detection_coldstart':\n            out, eval_res = tasks.eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/train_fcn.py",
    "content": "import argparse\nimport datetime\nimport os\nimport sys\nimport time\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom data.dataloader import UCRDataset\nfrom data.preprocessing import normalize_per_series, fill_nan_value, normalize_train_val_test\nfrom ts2vec_cls.ts2vec import TS2Vec\nfrom ts2vec_cls.utils import init_dl_program, name_with_datetime\nfrom tsm_utils import build_dataset, build_model, build_loss, evaluate, save_cls_result, get_all_datasets, set_seed\n\n\ndef save_checkpoint_callback(\n        save_every=1,\n        unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n\n    return callback\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', type=str, default='Coffee', help='The dataset name')\n    parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018',\n                        help='path of UCR folder')  ## '/SSD/lz/UCRArchive_2018', None\n    parser.add_argument('--num_classes', type=int, default=0, help='number of class')\n    parser.add_argument('--task', type=str, default='classification', help='classification or reconstruction')\n    parser.add_argument('--classifier_input', type=int, default=128, help='input dim of the classifiers')\n    parser.add_argument('--loss', type=str, default='cross_entropy', help='loss function')\n    parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay')\n    parser.add_argument('--backbone', type=str, default='fcn', help='encoder backbone, fcn or dilated')\n    parser.add_argument('--classifier', type=str, default='nonlinear', help='type of classifier(linear or nonlinear)')\n    parser.add_argument('--run_name', default='UCR',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, default='UCR',\n                        help='The data loader used to load the experimental data. This can be set to UCR, UEA, '\n                             'forecast_csv, forecast_csv_univar, anomaly, or anomaly_coldstart')\n    parser.add_argument('--gpu', type=int, default=1,\n                        help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--fcn_batch_size', type=int, default=128,\n                        help='(16, 128) larger batch size on the big dataset, ')  # 16\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000,\n                        help='For sequence with a length greater than <max_train_length>, it would be cropped '\n                             'into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--fcn_epoch', type=int, default=1000, help='fcn training epoch')\n    parser.add_argument('--save-every', type=int, default=None,\n                        help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=42, help='The random seed')\n    parser.add_argument('--random_seed', type=int, default=42, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=8,\n                        help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', action=\"store_true\", default=True,\n                        help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--save_csv_name', type=str, default='ts2vec_test_fcncls_0404_')\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_tsm/ts2vec_cls/result')\n    parser.add_argument('--cuda', type=str, default='cuda:1')\n\n    args = parser.parse_args()\n    set_seed(args)\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n    device_fcn = torch.device(args.cuda if torch.cuda.is_available() else \"cpu\")\n\n    print('Loading data... ', end='')\n\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n\n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n\n    sum_dataset, sum_target, num_classes = build_dataset(args)\n    args.num_classes = num_classes\n    if sum_dataset.shape[0] < args.fcn_batch_size:\n        args.fcn_batch_size = 16\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    test_accuracies = []\n    end_val_epochs = []\n    train_time = 0.0\n    for i, train_dataset in enumerate(train_datasets):\n        print(\"\\nStart K_fold = \", i)\n        train_labels = train_targets[i]\n\n        val_dataset = val_datasets[i]\n        val_labels = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_labels = test_targets[i]\n\n        # mean impute for missing values in dataset\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_per_series(train_dataset)\n            val_dataset = normalize_per_series(val_dataset)\n            test_dataset = normalize_per_series(test_dataset)\n        else:\n            train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n                                                                                test_dataset)\n\n        train_dataset = train_dataset[..., np.newaxis]\n        val_dataset = val_dataset[..., np.newaxis]\n        test_dataset = test_dataset[..., np.newaxis]\n\n        print(\"train_data.shape = \", train_dataset.shape)\n\n        t = time.time()\n\n        model = TS2Vec(\n            input_dims=train_dataset.shape[-1],\n            device=device,\n            **config\n        )\n        loss_log = model.fit(\n            train_dataset,\n            n_epochs=args.epochs,\n            n_iters=args.iters,\n            verbose=True\n        )\n        # model.save(f'{run_dir}/model.pkl')\n        train_repr = model.encode(train_dataset, encoding_window='full_series' if train_labels.ndim == 1 else None)\n        val_repr = model.encode(val_dataset, encoding_window='full_series' if val_labels.ndim == 1 else None)\n        test_repr = model.encode(test_dataset, encoding_window='full_series' if test_labels.ndim == 1 else None)\n        # accu = test_accu.cpu().numpy()\n        print(\"data info = \", train_repr.shape, test_repr.shape, train_dataset.shape, test_dataset.shape)\n        # print(type(train_repr), train_repr[:2])\n        model_fcn, classifier = build_model(args)\n        model_fcn, classifier = model_fcn.to(device_fcn), classifier.to(device_fcn)\n        loss = build_loss(args).to(device_fcn)\n        optimizer = torch.optim.Adam([{'params': model_fcn.parameters()}, {'params': classifier.parameters()}],\n                                     lr=args.lr, weight_decay=args.weight_decay)\n\n        train_set = UCRDataset(torch.from_numpy(train_repr).to(device_fcn),\n                               torch.from_numpy(train_labels).to(device_fcn).to(torch.int64))\n        val_set = UCRDataset(torch.from_numpy(val_repr).to(device_fcn),\n                             torch.from_numpy(val_labels).to(device_fcn).to(torch.int64))\n        test_set = UCRDataset(torch.from_numpy(test_repr).to(device_fcn),\n                              torch.from_numpy(test_labels).to(device_fcn).to(torch.int64))\n\n        train_loader = DataLoader(train_set, batch_size=args.fcn_batch_size, num_workers=0, drop_last=True)\n        val_loader = DataLoader(val_set, batch_size=args.fcn_batch_size, num_workers=0)\n        test_loader = DataLoader(test_set, batch_size=args.fcn_batch_size, num_workers=0)\n\n        train_loss = []\n        train_accuracy = []\n        num_steps = args.fcn_epoch // args.batch_size\n\n        last_loss = float('inf')\n        stop_count = 0\n        increase_count = 0\n\n        test_accuracy = 0\n        min_val_loss = float('inf')\n        end_val_epoch = 0\n\n        num_steps = train_set.__len__() // args.batch_size\n        for epoch in range(args.fcn_epoch):\n            # early stopping in finetune\n            if stop_count == 50 or increase_count == 50:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n\n            epoch_train_loss = 0\n            epoch_train_acc = 0\n            model_fcn.train()\n            classifier.train()\n            for x, y in train_loader:\n                optimizer.zero_grad()\n                pred = model_fcn(x)\n                pred = classifier(pred)\n\n                step_loss = loss(pred, y)\n                step_loss.backward()\n                optimizer.step()\n\n                epoch_train_loss += step_loss.item()\n                epoch_train_acc += torch.sum(torch.argmax(pred.data, axis=1) == y) / len(y)\n\n            epoch_train_loss /= num_steps\n            epoch_train_acc /= num_steps\n\n            model_fcn.eval()\n            classifier.eval()\n            val_loss, val_accu = evaluate(val_loader, model_fcn, classifier, loss, device_fcn)\n            if min_val_loss > val_loss:\n                min_val_loss = val_loss\n                end_val_epoch = epoch\n                test_loss, test_accuracy = evaluate(test_loader, model_fcn, classifier, loss, device_fcn)\n\n            if epoch % 100 == 0:\n                print(\n                    \"epoch : {}, train loss: {} , train accuracy : {}, \\nval loss : {}, val accuracy : {}, \\ntest loss : {}, test accuracy : {}\".format(\n                        epoch, epoch_train_loss, epoch_train_acc, val_loss, val_accu, test_loss, test_accuracy))\n\n            if abs(last_loss - val_loss) <= 1e-4:\n                stop_count += 1\n            else:\n                stop_count = 0\n\n            if val_loss > last_loss:\n                increase_count += 1\n            else:\n                increase_count = 0\n\n            last_loss = val_loss\n\n        # out, eval_res = tasks.eval_classification(model, train_dataset, train_labels, test_dataset, test_labels,\n        #                                           eval_protocol='svm')\n        t = time.time() - t\n        print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\\n\")\n        train_time += t\n\n        # print('Evaluation result:', eval_res)\n        # test_accuracies.append(eval_res['acc'])\n        test_accuracies.append(test_accuracy)\n        end_val_epochs.append(end_val_epoch)\n\n    test_accuracies = torch.Tensor(test_accuracies)\n    end_val_epochs = np.array(end_val_epochs)\n    save_cls_result(args, test_accu=torch.mean(test_accuracies), test_std=torch.std(test_accuracies),\n                    train_time=train_time / 5, end_val_epoch=np.mean(end_val_epochs))\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/train_tsm.py",
    "content": "import argparse\nimport datetime\nimport os\nimport sys\nimport time\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport numpy as np\nimport torch\n\nfrom data.preprocessing import normalize_per_series, fill_nan_value, normalize_train_val_test\nfrom ts2vec_cls import tasks\nfrom ts2vec_cls.ts2vec import TS2Vec\nfrom ts2vec_cls.utils import init_dl_program, name_with_datetime\nfrom tsm_utils import build_dataset, save_cls_result, get_all_datasets, set_seed\n\n\ndef save_checkpoint_callback(\n        save_every=1,\n        unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n\n    return callback\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', type=str, default='Coffee', help='The dataset name')\n    parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018',\n                        help='path of UCR folder')  ## '/SSD/lz/UCRArchive_2018', None\n    parser.add_argument('--run_name', default='UCR',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, default='UCR',\n                        help='The data loader used to load the experimental data. This can be set to UCR, UEA, '\n                             'forecast_csv, forecast_csv_univar, anomaly, or anomaly_coldstart')\n    parser.add_argument('--gpu', type=int, default=1,\n                        help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000,\n                        help='For sequence with a length greater than <max_train_length>, it would be cropped '\n                             'into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None,\n                        help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=42, help='The random seed')\n    parser.add_argument('--random_seed', type=int, default=42, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=8,\n                        help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', action=\"store_true\", default=True,\n                        help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--save_csv_name', type=str, default='ts2vec_test_cls_0409_')\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_tsm/ts2vec_cls/result')\n    args = parser.parse_args()\n    set_seed(args)\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n\n    print('Loading data... ', end='')\n\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n\n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n\n    sum_dataset, sum_target, num_classes = build_dataset(args)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    test_accuracies = []\n    train_time = 0.0\n    for i, train_dataset in enumerate(train_datasets):\n        print(\"\\nStart K_fold = \", i)\n        train_labels = train_targets[i]\n\n        val_dataset = val_datasets[i]\n        val_labels = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_labels = test_targets[i]\n\n        # mean impute for missing values in dataset\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            # TODO normalize per series\n            train_dataset = normalize_per_series(train_dataset)\n            val_dataset = normalize_per_series(val_dataset)\n            test_dataset = normalize_per_series(test_dataset)\n        else:\n            train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n                                                                                test_dataset)\n\n        train_dataset = train_dataset[..., np.newaxis]\n        val_dataset = val_dataset[..., np.newaxis]\n        test_dataset = test_dataset[..., np.newaxis]\n\n        # print(type(train_dataset))\n        train_val_dataset = np.concatenate((train_dataset, val_dataset))\n        train_val_labels = np.concatenate((train_labels, val_labels))\n        # print(train_labels.shape, val_labels.shape)\n        # print(\"train, val train_val_data.shape = \", train_dataset.shape, val_dataset.shape, train_val_dataset.shape, train_val_labels.shape)\n\n        t = time.time()\n\n        model = TS2Vec(\n            input_dims=train_dataset.shape[-1],\n            device=device,\n            **config\n        )\n        loss_log = model.fit(\n            train_dataset,\n            n_epochs=args.epochs,\n            n_iters=args.iters,\n            verbose=True\n        )\n        # model.save(f'{run_dir}/model.pkl')\n        ## evalution on test_dataset,\n        out, eval_res = tasks.eval_classification(model, train_val_dataset, train_val_labels, test_dataset, test_labels,\n                                                  eval_protocol='svm')\n        t = time.time() - t\n        print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\\n\")\n        train_time += t\n\n        print('Evaluation result:', eval_res)\n        test_accuracies.append(eval_res['acc'])\n\n    test_accuracies = torch.Tensor(test_accuracies)\n    save_cls_result(args, test_accu=torch.mean(test_accuracies), test_std=torch.std(test_accuracies),\n                    train_time=train_time / 5, end_val_epoch=0.00, seeds=args.random_seed)\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/train_tsm_uea.py",
    "content": "import argparse\nimport datetime\nimport os\nimport sys\nimport time\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport numpy as np\nimport torch\n\nfrom data.preprocessing import normalize_train_val_test\nfrom data.preprocessing import load_UEA, fill_nan_value, normalize_uea_set\nfrom ts2vec_cls import tasks\nfrom ts2vec_cls.ts2vec import TS2Vec\nfrom ts2vec_cls.utils import init_dl_program, name_with_datetime\nfrom tsm_utils import save_cls_result, get_all_datasets, set_seed\n\n\ndef save_checkpoint_callback(\n        save_every=1,\n        unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n\n    return callback\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', type=str, default='BasicMotions', help='The dataset name')\n    parser.add_argument('--dataroot', type=str, default='/SSD/lz/Multivariate2018_arff',\n                        help='path of UEA folder')\n    parser.add_argument('--run_name', default='UCR',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--loader', type=str, default='UCR',\n                        help='The data loader used to load the experimental data. This can be set to UCR, UEA, '\n                             'forecast_csv, forecast_csv_univar, anomaly, or anomaly_coldstart')\n    parser.add_argument('--gpu', type=int, default=1,\n                        help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000,\n                        help='For sequence with a length greater than <max_train_length>, it would be cropped '\n                             'into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None,\n                        help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=42, help='The random seed')\n    parser.add_argument('--random_seed', type=int, default=42, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=8,\n                        help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', action=\"store_true\", default=True,\n                        help='Whether to perform evaluation after training')\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n    parser.add_argument('--normalize_way', type=str, default='single', help='single or train_set')\n    parser.add_argument('--save_csv_name', type=str, default='ts2vec_test_uea_0423_')\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/time_tsm/ts2vec_cls/result')\n    args = parser.parse_args()\n    set_seed(args)\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n\n    print('Loading data... ', end='')\n\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n\n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n\n    # sum_dataset, sum_target, num_classes = build_dataset(args)\n    sum_dataset, sum_target, num_classes = load_UEA(\n        dataroot=args.dataroot,\n        dataset=args.dataset)\n\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = get_all_datasets(\n        sum_dataset, sum_target)\n\n    test_accuracies = []\n    train_time = 0.0\n    for i, train_dataset in enumerate(train_datasets):\n        print(\"\\nStart K_fold = \", i)\n        train_labels = train_targets[i]\n\n        val_dataset = val_datasets[i]\n        val_labels = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_labels = test_targets[i]\n\n        # mean impute for missing values in dataset\n        train_dataset, val_dataset, test_dataset = fill_nan_value(train_dataset, val_dataset, test_dataset)\n\n        if args.normalize_way == 'single':\n            train_dataset = normalize_uea_set(train_dataset)\n            val_dataset = normalize_uea_set(val_dataset)\n            test_dataset = normalize_uea_set(test_dataset)\n        else:\n            train_dataset, val_dataset, test_dataset = normalize_train_val_test(train_dataset, val_dataset,\n                                                                                test_dataset)\n\n        # train_dataset = train_dataset[..., np.newaxis]\n        # val_dataset = val_dataset[..., np.newaxis]\n        # test_dataset = test_dataset[..., np.newaxis]\n\n        # print(type(train_dataset))\n        train_val_dataset = np.concatenate((train_dataset, val_dataset))\n        train_val_labels = np.concatenate((train_labels, val_labels))\n        # print(train_labels.shape, val_labels.shape)\n        # print(\"train, val train_val_data.shape = \", train_dataset.shape, val_dataset.shape, train_val_dataset.shape, train_val_labels.shape)\n\n        t = time.time()\n\n        model = TS2Vec(\n            input_dims=train_dataset.shape[-1],\n            device=device,\n            **config\n        )\n        loss_log = model.fit(\n            train_dataset,\n            n_epochs=args.epochs,\n            n_iters=args.iters,\n            verbose=True\n        )\n        # model.save(f'{run_dir}/model.pkl')\n        ## evalution on test_dataset,\n        out, eval_res = tasks.eval_classification(model, train_val_dataset, train_val_labels, test_dataset, test_labels,\n                                                  eval_protocol='svm')\n        t = time.time() - t\n        print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\\n\")\n        train_time += t\n\n        print('Evaluation result:', eval_res)\n        test_accuracies.append(eval_res['acc'])\n\n    test_accuracies = torch.Tensor(test_accuracies)\n    save_cls_result(args, test_accu=torch.mean(test_accuracies), test_std=torch.std(test_accuracies),\n                    train_time=train_time / 5, end_val_epoch=0.00, seeds=args.random_seed)\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/ts2vec.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nfrom models import TSEncoder\nfrom models.losses import hierarchical_contrastive_loss\nfrom utils import take_per_row, split_with_nan, centerize_vary_length_series, torch_pad_nan\nimport math\n\nclass TS2Vec:\n    '''The TS2Vec model'''\n    \n    def __init__(\n        self,\n        input_dims,\n        output_dims=320,\n        hidden_dims=64,\n        depth=10,\n        device='cuda',\n        lr=0.001,\n        batch_size=16,\n        max_train_length=None,\n        temporal_unit=0,\n        after_iter_callback=None,\n        after_epoch_callback=None\n    ):\n        ''' Initialize a TS2Vec model.\n        \n        Args:\n            input_dims (int): The input dimension. For a univariate time series, this should be set to 1.\n            output_dims (int): The representation dimension.\n            hidden_dims (int): The hidden dimension of the encoder.\n            depth (int): The number of hidden residual blocks in the encoder.\n            device (int): The gpu used for training and inference.\n            lr (int): The learning rate.\n            batch_size (int): The batch size.\n            max_train_length (Union[int, NoneType]): The maximum allowed sequence length for training. For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length>.\n            temporal_unit (int): The minimum unit to perform temporal contrast. When training on a very long sequence, this param helps to reduce the cost of time and memory.\n            after_iter_callback (Union[Callable, NoneType]): A callback function that would be called after each iteration.\n            after_epoch_callback (Union[Callable, NoneType]): A callback function that would be called after each epoch.\n        '''\n        \n        super().__init__()\n        self.device = device\n        self.lr = lr\n        self.batch_size = batch_size\n        self.max_train_length = max_train_length\n        self.temporal_unit = temporal_unit\n        \n        self._net = TSEncoder(input_dims=input_dims, output_dims=output_dims, hidden_dims=hidden_dims, depth=depth).to(self.device)\n        self.net = torch.optim.swa_utils.AveragedModel(self._net)\n        self.net.update_parameters(self._net)\n        \n        self.after_iter_callback = after_iter_callback\n        self.after_epoch_callback = after_epoch_callback\n        \n        self.n_epochs = 0\n        self.n_iters = 0\n    \n    def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):\n        ''' Training the TS2Vec model.\n        \n        Args:\n            train_data (numpy.ndarray): The training data. It should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            n_epochs (Union[int, NoneType]): The number of epochs. When this reaches, the training stops.\n            n_iters (Union[int, NoneType]): The number of iterations. When this reaches, the training stops. If both n_epochs and n_iters are not specified, a default setting would be used that sets n_iters to 200 for a dataset with size <= 100000, 600 otherwise.\n            verbose (bool): Whether to print the training loss after each epoch.\n            \n        Returns:\n            loss_log: a list containing the training losses on each epoch.\n        '''\n        assert train_data.ndim == 3\n        \n        if n_iters is None and n_epochs is None:\n            n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters\n        \n        if self.max_train_length is not None:\n            sections = train_data.shape[1] // self.max_train_length\n            if sections >= 2:\n                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)\n\n        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0)\n        if temporal_missing[0] or temporal_missing[-1]:\n            train_data = centerize_vary_length_series(train_data)\n                \n        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)]\n        \n        train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))\n        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)\n        \n        optimizer = torch.optim.AdamW(self._net.parameters(), lr=self.lr)\n        \n        loss_log = []\n        \n        while True:\n            if n_epochs is not None and self.n_epochs >= n_epochs:\n                break\n            \n            cum_loss = 0\n            n_epoch_iters = 0\n            \n            interrupted = False\n\n            for batch in train_loader:\n                # print(\"self.n_iters = \", self.n_iters, \", n_iters = \", n_iters)\n                if n_iters is not None and self.n_iters >= n_iters:\n                    interrupted = True\n                    break\n                \n                x = batch[0]\n                if self.max_train_length is not None and x.size(1) > self.max_train_length:\n                    window_offset = np.random.randint(x.size(1) - self.max_train_length + 1)\n                    x = x[:, window_offset : window_offset + self.max_train_length]\n                x = x.to(self.device)\n                \n                ts_l = x.size(1)\n                crop_l = np.random.randint(low=2 ** (self.temporal_unit + 1), high=ts_l+1)\n                crop_left = np.random.randint(ts_l - crop_l + 1)\n                crop_right = crop_left + crop_l\n                crop_eleft = np.random.randint(crop_left + 1)\n                crop_eright = np.random.randint(low=crop_right, high=ts_l + 1)\n                crop_offset = np.random.randint(low=-crop_eleft, high=ts_l - crop_eright + 1, size=x.size(0))\n                \n                optimizer.zero_grad()\n                \n                out1 = self._net(take_per_row(x, crop_offset + crop_eleft, crop_right - crop_eleft))\n                out1 = out1[:, -crop_l:]\n                \n                out2 = self._net(take_per_row(x, crop_offset + crop_left, crop_eright - crop_left))\n                out2 = out2[:, :crop_l]\n                \n                loss = hierarchical_contrastive_loss(\n                    out1,\n                    out2,\n                    temporal_unit=self.temporal_unit\n                )\n                \n                loss.backward()\n                optimizer.step()\n                self.net.update_parameters(self._net)\n                    \n                cum_loss += loss.item()\n                n_epoch_iters += 1\n                \n                self.n_iters += 1\n                \n                if self.after_iter_callback is not None:\n                    self.after_iter_callback(self, loss.item())\n            \n            if interrupted:\n                break\n            \n            cum_loss /= n_epoch_iters\n            loss_log.append(cum_loss)\n            if verbose:\n                print(f\"Epoch #{self.n_epochs}: loss={cum_loss}\")\n            self.n_epochs += 1\n            \n            if self.after_epoch_callback is not None:\n                self.after_epoch_callback(self, cum_loss)\n            \n        return loss_log\n    \n    def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_window=None):\n        out = self.net(x.to(self.device, non_blocking=True), mask)\n        if encoding_window == 'full_series':\n            if slicing is not None:\n                out = out[:, slicing]\n            out = F.max_pool1d(\n                out.transpose(1, 2),\n                kernel_size = out.size(1),\n            ).transpose(1, 2)\n            \n        elif isinstance(encoding_window, int):\n            out = F.max_pool1d(\n                out.transpose(1, 2),\n                kernel_size = encoding_window,\n                stride = 1,\n                padding = encoding_window // 2\n            ).transpose(1, 2)\n            if encoding_window % 2 == 0:\n                out = out[:, :-1]\n            if slicing is not None:\n                out = out[:, slicing]\n            \n        elif encoding_window == 'multiscale':\n            p = 0\n            reprs = []\n            while (1 << p) + 1 < out.size(1):\n                t_out = F.max_pool1d(\n                    out.transpose(1, 2),\n                    kernel_size = (1 << (p + 1)) + 1,\n                    stride = 1,\n                    padding = 1 << p\n                ).transpose(1, 2)\n                if slicing is not None:\n                    t_out = t_out[:, slicing]\n                reprs.append(t_out)\n                p += 1\n            out = torch.cat(reprs, dim=-1)\n            \n        else:\n            if slicing is not None:\n                out = out[:, slicing]\n            \n        return out.cpu()\n    \n    def encode(self, data, mask=None, encoding_window=None, casual=False, sliding_length=None, sliding_padding=0, batch_size=None):\n        ''' Compute representations using the model.\n        \n        Args:\n            data (numpy.ndarray): This should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            mask (str): The mask used by encoder can be specified with this parameter. This can be set to 'binomial', 'continuous', 'all_true', 'all_false' or 'mask_last'.\n            encoding_window (Union[str, int]): When this param is specified, the computed representation would the max pooling over this window. This can be set to 'full_series', 'multiscale' or an integer specifying the pooling kernel size.\n            casual (bool): When this param is set to True, the future informations would not be encoded into representation of each timestamp.\n            sliding_length (Union[int, NoneType]): The length of sliding window. When this param is specified, a sliding inference would be applied on the time series.\n            sliding_padding (int): This param specifies the contextual data length used for inference every sliding windows.\n            batch_size (Union[int, NoneType]): The batch size used for inference. If not specified, this would be the same batch size as training.\n            \n        Returns:\n            repr: The representations for data.\n        '''\n        assert self.net is not None, 'please train or load a net first'\n        assert data.ndim == 3\n        if batch_size is None:\n            batch_size = self.batch_size\n        n_samples, ts_l, _ = data.shape\n\n        org_training = self.net.training\n        self.net.eval()\n        \n        dataset = TensorDataset(torch.from_numpy(data).to(torch.float))\n        loader = DataLoader(dataset, batch_size=batch_size)\n        \n        with torch.no_grad():\n            output = []\n            for batch in loader:\n                x = batch[0]\n                if sliding_length is not None:\n                    reprs = []\n                    if n_samples < batch_size:\n                        calc_buffer = []\n                        calc_buffer_l = 0\n                    for i in range(0, ts_l, sliding_length):\n                        l = i - sliding_padding\n                        r = i + sliding_length + (sliding_padding if not casual else 0)\n                        x_sliding = torch_pad_nan(\n                            x[:, max(l, 0) : min(r, ts_l)],\n                            left=-l if l<0 else 0,\n                            right=r-ts_l if r>ts_l else 0,\n                            dim=1\n                        )\n                        if n_samples < batch_size:\n                            if calc_buffer_l + n_samples > batch_size:\n                                out = self._eval_with_pooling(\n                                    torch.cat(calc_buffer, dim=0),\n                                    mask,\n                                    slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                    encoding_window=encoding_window\n                                )\n                                reprs += torch.split(out, n_samples)\n                                calc_buffer = []\n                                calc_buffer_l = 0\n                            calc_buffer.append(x_sliding)\n                            calc_buffer_l += n_samples\n                        else:\n                            out = self._eval_with_pooling(\n                                x_sliding,\n                                mask,\n                                slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                encoding_window=encoding_window\n                            )\n                            reprs.append(out)\n\n                    if n_samples < batch_size:\n                        if calc_buffer_l > 0:\n                            out = self._eval_with_pooling(\n                                torch.cat(calc_buffer, dim=0),\n                                mask,\n                                slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                encoding_window=encoding_window\n                            )\n                            reprs += torch.split(out, n_samples)\n                            calc_buffer = []\n                            calc_buffer_l = 0\n                    \n                    out = torch.cat(reprs, dim=1)\n                    if encoding_window == 'full_series':\n                        out = F.max_pool1d(\n                            out.transpose(1, 2).contiguous(),\n                            kernel_size = out.size(1),\n                        ).squeeze(1)\n                else:\n                    out = self._eval_with_pooling(x, mask, encoding_window=encoding_window)\n                    if encoding_window == 'full_series':\n                        out = out.squeeze(1)\n                        \n                output.append(out)\n                \n            output = torch.cat(output, dim=0)\n            \n        self.net.train(org_training)\n        return output.numpy()\n    \n    def save(self, fn):\n        ''' Save the model to a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        torch.save(self.net.state_dict(), fn)\n    \n    def load(self, fn):\n        ''' Load the model from a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        state_dict = torch.load(fn, map_location=self.device)\n        self.net.load_state_dict(state_dict)\n    \n"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/utils.py",
    "content": "import os\nimport numpy as np\nimport pickle\nimport torch\nimport random\nfrom datetime import datetime\n\ndef pkl_save(name, var):\n    with open(name, 'wb') as f:\n        pickle.dump(var, f)\n\ndef pkl_load(name):\n    with open(name, 'rb') as f:\n        return pickle.load(f)\n    \ndef torch_pad_nan(arr, left=0, right=0, dim=0):\n    if left > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = left\n        arr = torch.cat((torch.full(padshape, np.nan), arr), dim=dim)\n    if right > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = right\n        arr = torch.cat((arr, torch.full(padshape, np.nan)), dim=dim)\n    return arr\n    \ndef pad_nan_to_target(array, target_length, axis=0, both_side=False):\n    assert array.dtype in [np.float16, np.float32, np.float64]\n    pad_size = target_length - array.shape[axis]\n    if pad_size <= 0:\n        return array\n    npad = [(0, 0)] * array.ndim\n    if both_side:\n        npad[axis] = (pad_size // 2, pad_size - pad_size//2)\n    else:\n        npad[axis] = (0, pad_size)\n    return np.pad(array, pad_width=npad, mode='constant', constant_values=np.nan)\n\ndef split_with_nan(x, sections, axis=0):\n    assert x.dtype in [np.float16, np.float32, np.float64]\n    arrs = np.array_split(x, sections, axis=axis)\n    target_length = arrs[0].shape[axis]\n    for i in range(len(arrs)):\n        arrs[i] = pad_nan_to_target(arrs[i], target_length, axis=axis)\n    return arrs\n\ndef take_per_row(A, indx, num_elem):\n    all_indx = indx[:,None] + np.arange(num_elem)\n    return A[torch.arange(all_indx.shape[0])[:,None], all_indx]\n\ndef centerize_vary_length_series(x):\n    prefix_zeros = np.argmax(~np.isnan(x).all(axis=-1), axis=1)\n    suffix_zeros = np.argmax(~np.isnan(x[:, ::-1]).all(axis=-1), axis=1)\n    offset = (prefix_zeros + suffix_zeros) // 2 - prefix_zeros\n    rows, column_indices = np.ogrid[:x.shape[0], :x.shape[1]]\n    offset[offset < 0] += x.shape[1]\n    column_indices = column_indices - offset[:, np.newaxis]\n    return x[rows, column_indices]\n\ndef data_dropout(arr, p):\n    B, T = arr.shape[0], arr.shape[1]\n    mask = np.full(B*T, False, dtype=np.bool)\n    ele_sel = np.random.choice(\n        B*T,\n        size=int(B*T*p),\n        replace=False\n    )\n    mask[ele_sel] = True\n    res = arr.copy()\n    res[mask.reshape(B, T)] = np.nan\n    return res\n\ndef name_with_datetime(prefix='default'):\n    now = datetime.now()\n    return prefix + '_' + now.strftime(\"%Y%m%d_%H%M%S\")\n\ndef init_dl_program(\n    device_name,\n    seed=None,\n    use_cudnn=True,\n    deterministic=False,\n    benchmark=False,\n    use_tf32=False,\n    max_threads=None\n):\n    import torch\n    if max_threads is not None:\n        torch.set_num_threads(max_threads)  # intraop\n        if torch.get_num_interop_threads() != max_threads:\n            torch.set_num_interop_threads(max_threads)  # interop\n        try:\n            import mkl\n        except:\n            pass\n        else:\n            mkl.set_num_threads(max_threads)\n        \n    if seed is not None:\n        random.seed(seed)\n        seed += 1\n        np.random.seed(seed)\n        seed += 1\n        torch.manual_seed(seed)\n        \n    if isinstance(device_name, (str, int)):\n        device_name = [device_name]\n    \n    devices = []\n    for t in reversed(device_name):\n        t_device = torch.device(t)\n        devices.append(t_device)\n        if t_device.type == 'cuda':\n            assert torch.cuda.is_available()\n            torch.cuda.set_device(t_device)\n            if seed is not None:\n                seed += 1\n                torch.cuda.manual_seed(seed)\n    devices.reverse()\n    torch.backends.cudnn.enabled = use_cudnn\n    torch.backends.cudnn.deterministic = deterministic\n    torch.backends.cudnn.benchmark = benchmark\n    \n    if hasattr(torch.backends.cudnn, 'allow_tf32'):\n        torch.backends.cudnn.allow_tf32 = use_tf32\n        torch.backends.cuda.matmul.allow_tf32 = use_tf32\n        \n    return devices if len(devices) > 1 else devices[0]\n\n"
  },
  {
    "path": "ts_classification_methods/tsm_utils.py",
    "content": "import os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.optim\n\nfrom data.preprocessing import load_data, k_fold, transfer_labels\nfrom model.loss import cross_entropy, reconstruction_loss\nfrom model.tsm_model import FCN, DilatedConvolution, Classifier, NonLinearClassifier, RNNDecoder, FCNDecoder\n\n\ndef set_seed(args):\n    random.seed(args.random_seed)\n    np.random.seed(args.random_seed)\n    torch.manual_seed(args.random_seed)\n    torch.cuda.manual_seed(args.random_seed)\n    torch.cuda.manual_seed_all(args.random_seed)\n\n\ndef build_model(args):\n    if args.backbone == 'fcn':\n        model = FCN(args.num_classes, args.input_size)\n    elif args.backbone == 'dilated':\n        model = DilatedConvolution(args.in_channels, args.embedding_channels,\n                                   args.out_channels, args.depth, args.reduced_size, args.kernel_size, args.num_classes)\n\n    if args.task == 'classification':\n        if args.classifier == 'nonlinear':\n            classifier = NonLinearClassifier(args.classifier_input, 128, args.num_classes)\n        elif args.classifier == 'linear':\n            classifier = Classifier(args.classifier_input, args.num_classes)\n\n    elif args.task == 'reconstruction':\n        if args.decoder_backbone == 'rnn':\n            classifier = RNNDecoder(input_dim=args.input_size)\n        if args.decoder_backbone == 'fcn':\n            classifier = FCNDecoder(num_classes=args.num_classes, seq_len=args.seq_len, input_size=args.input_size)\n\n    return model, classifier\n\n\ndef build_dataset(args):\n    sum_dataset, sum_target, num_classes = load_data(args.dataroot, args.dataset)\n\n    sum_target = transfer_labels(sum_target)\n    return sum_dataset, sum_target, num_classes\n\n\ndef build_loss(args):\n    if args.loss == 'cross_entropy':\n        return cross_entropy()\n    elif args.loss == 'reconstruction':\n        return reconstruction_loss()\n\n\ndef build_optimizer(args):\n    if args.optimizer == 'adam':\n        return torch.optim.Adam(lr=args.lr, weight_decay=args.weight_decay)\n    elif args.optimizer == 'sgd':\n        return torch.optim.SGD(lr=args.lr, weight_decay=args.weight_decay)\n\n\ndef evaluate(val_loader, model, classifier, loss, device):\n    val_loss = 0\n    val_accu = 0\n\n    sum_len = 0\n    for data, target in val_loader:\n        '''\n        data, target = data.to(device), target.to(device)\n        target = target.to(torch.int64)\n        '''\n        with torch.no_grad():\n            val_pred = model(data)\n            val_pred = classifier(val_pred)\n            val_loss += loss(val_pred, target).item()\n\n            val_accu += torch.sum(torch.argmax(val_pred.data, axis=1) == target)\n            sum_len += len(target)\n\n    return val_loss / sum_len, val_accu / sum_len\n\n\ndef save_finetune_result(args, accu, std):\n    save_path = os.path.join(args.save_dir, args.source_dataset, 'finetune_result.csv')\n    # save_path = os.path.join(args.save_dir, 'finetune_result.csv')\n    accu = accu.cpu().numpy()\n    std = std.cpu().numpy()\n    if os.path.exists(save_path):\n        result_form = pd.read_csv(save_path)\n    else:\n        result_form = pd.DataFrame(columns=['dataset', 'accuracy', 'std'])\n\n    result_form = result_form.append({'dataset': args.dataset, 'accuracy': '%.4f' % accu, 'std': '%.4f' % std},\n                                     ignore_index=True)\n    result_form = result_form.iloc[:, -3:]\n    result_form.to_csv(save_path)\n\n\ndef save_cls_result(args, test_accu, test_std, train_time, end_val_epoch, seeds=42):\n    save_path = os.path.join(args.save_dir, '', args.save_csv_name + 'cls_result.csv')\n    accu = test_accu.cpu().numpy()\n    std = test_std.cpu().numpy()\n    if os.path.exists(save_path):\n        result_form = pd.read_csv(save_path, index_col=0)\n    else:\n        result_form = pd.DataFrame(\n            columns=['dataset_name', 'test_accuracy', 'test_std', 'train_time', 'end_val_epoch', 'seeds'])\n\n    result_form = result_form.append(\n        {'dataset_name': args.dataset, 'test_accuracy': '%.4f' % accu, 'test_std': '%.4f' % std,\n         'train_time': '%.4f' % train_time, 'end_val_epoch': '%.2f' % end_val_epoch,\n         'seeds': '%d' % seeds}, ignore_index=True)\n\n    result_form.to_csv(save_path, index=True, index_label=\"id\")\n\n\ndef get_all_datasets(data, target):\n    return k_fold(data, target)\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/scripts/classification.sh",
    "content": "python src/main.py --dataset AllGestureWiimoteY --data_dir /dev_data/zzj/hzy/datasets/UCR/AllGestureWiimoteY --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset UWaveGestureLibraryX --data_dir /dev_data/zzj/hzy/datasets/UCR/UWaveGestureLibraryX --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset DiatomSizeReduction --data_dir /dev_data/zzj/hzy/datasets/UCR/DiatomSizeReduction --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset FreezerSmallTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/FreezerSmallTrain --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ScreenType --data_dir /dev_data/zzj/hzy/datasets/UCR/ScreenType --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset MixedShapesSmallTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/MixedShapesSmallTrain --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SonyAIBORobotSurface2 --data_dir /dev_data/zzj/hzy/datasets/UCR/SonyAIBORobotSurface2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset LargeKitchenAppliances --data_dir /dev_data/zzj/hzy/datasets/UCR/LargeKitchenAppliances --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ProximalPhalanxOutlineCorrect --data_dir /dev_data/zzj/hzy/datasets/UCR/ProximalPhalanxOutlineCorrect --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset OSULeaf --data_dir /dev_data/zzj/hzy/datasets/UCR/OSULeaf --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset OliveOil --data_dir /dev_data/zzj/hzy/datasets/UCR/OliveOil --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset FreezerRegularTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/FreezerRegularTrain --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Herring --data_dir /dev_data/zzj/hzy/datasets/UCR/Herring --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GesturePebbleZ1 --data_dir /dev_data/zzj/hzy/datasets/UCR/GesturePebbleZ1 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset MelbournePedestrian --data_dir /dev_data/zzj/hzy/datasets/UCR/MelbournePedestrian --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset PhalangesOutlinesCorrect --data_dir /dev_data/zzj/hzy/datasets/UCR/PhalangesOutlinesCorrect --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset CricketZ --data_dir /dev_data/zzj/hzy/datasets/UCR/CricketZ --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ACSF1 --data_dir /dev_data/zzj/hzy/datasets/UCR/ACSF1 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset FaceFour --data_dir /dev_data/zzj/hzy/datasets/UCR/FaceFour --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SemgHandGenderCh2 --data_dir /dev_data/zzj/hzy/datasets/UCR/SemgHandGenderCh2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Haptics --data_dir /dev_data/zzj/hzy/datasets/UCR/Haptics --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset UWaveGestureLibraryY --data_dir /dev_data/zzj/hzy/datasets/UCR/UWaveGestureLibraryY --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Coffee --data_dir /dev_data/zzj/hzy/datasets/UCR/Coffee --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset TwoLeadECG --data_dir /dev_data/zzj/hzy/datasets/UCR/TwoLeadECG --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset DistalPhalanxOutlineAgeGroup --data_dir /dev_data/zzj/hzy/datasets/UCR/DistalPhalanxOutlineAgeGroup --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset MixedShapesRegularTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/MixedShapesRegularTrain --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SmoothSubspace --data_dir /dev_data/zzj/hzy/datasets/UCR/SmoothSubspace --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Meat --data_dir /dev_data/zzj/hzy/datasets/UCR/Meat --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ShapesAll --data_dir /dev_data/zzj/hzy/datasets/UCR/ShapesAll --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset InsectEPGSmallTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/InsectEPGSmallTrain --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset CinCECGTorso --data_dir /dev_data/zzj/hzy/datasets/UCR/CinCECGTorso --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset BeetleFly --data_dir /dev_data/zzj/hzy/datasets/UCR/BeetleFly --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Ham --data_dir /dev_data/zzj/hzy/datasets/UCR/Ham --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ProximalPhalanxTW --data_dir /dev_data/zzj/hzy/datasets/UCR/ProximalPhalanxTW --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ItalyPowerDemand --data_dir /dev_data/zzj/hzy/datasets/UCR/ItalyPowerDemand --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GunPointMaleVersusFemale --data_dir /dev_data/zzj/hzy/datasets/UCR/GunPointMaleVersusFemale --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SonyAIBORobotSurface1 --data_dir /dev_data/zzj/hzy/datasets/UCR/SonyAIBORobotSurface1 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset MedicalImages --data_dir /dev_data/zzj/hzy/datasets/UCR/MedicalImages --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SmallKitchenAppliances --data_dir /dev_data/zzj/hzy/datasets/UCR/SmallKitchenAppliances --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset PigCVP --data_dir /dev_data/zzj/hzy/datasets/UCR/PigCVP --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Crop --data_dir /dev_data/zzj/hzy/datasets/UCR/Crop --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Chinatown --data_dir /dev_data/zzj/hzy/datasets/UCR/Chinatown --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset PLAID --data_dir /dev_data/zzj/hzy/datasets/UCR/PLAID --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset RefrigerationDevices --data_dir /dev_data/zzj/hzy/datasets/UCR/RefrigerationDevices --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Wine --data_dir /dev_data/zzj/hzy/datasets/UCR/Wine --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Yoga --data_dir /dev_data/zzj/hzy/datasets/UCR/Yoga --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset AllGestureWiimoteX --data_dir /dev_data/zzj/hzy/datasets/UCR/AllGestureWiimoteX --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset DistalPhalanxTW --data_dir /dev_data/zzj/hzy/datasets/UCR/DistalPhalanxTW --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Computers --data_dir /dev_data/zzj/hzy/datasets/UCR/Computers --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ElectricDevices --data_dir /dev_data/zzj/hzy/datasets/UCR/ElectricDevices --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Adiac --data_dir /dev_data/zzj/hzy/datasets/UCR/Adiac --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset InlineSkate --data_dir /dev_data/zzj/hzy/datasets/UCR/InlineSkate --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset FacesUCR --data_dir /dev_data/zzj/hzy/datasets/UCR/FacesUCR --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ShapeletSim --data_dir /dev_data/zzj/hzy/datasets/UCR/ShapeletSim --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GunPointAgeSpan --data_dir /dev_data/zzj/hzy/datasets/UCR/GunPointAgeSpan --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Phoneme --data_dir /dev_data/zzj/hzy/datasets/UCR/Phoneme --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset CricketX --data_dir /dev_data/zzj/hzy/datasets/UCR/CricketX --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Lightning2 --data_dir /dev_data/zzj/hzy/datasets/UCR/Lightning2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Beef --data_dir /dev_data/zzj/hzy/datasets/UCR/Beef --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset PowerCons --data_dir /dev_data/zzj/hzy/datasets/UCR/PowerCons --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Plane --data_dir /dev_data/zzj/hzy/datasets/UCR/Plane --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset NonInvasiveFetalECGThorax2 --data_dir /dev_data/zzj/hzy/datasets/UCR/NonInvasiveFetalECGThorax2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset UMD --data_dir /dev_data/zzj/hzy/datasets/UCR/UMD --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Wafer --data_dir /dev_data/zzj/hzy/datasets/UCR/Wafer --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ToeSegmentation1 --data_dir /dev_data/zzj/hzy/datasets/UCR/ToeSegmentation1 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Car --data_dir /dev_data/zzj/hzy/datasets/UCR/Car --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset UWaveGestureLibraryZ --data_dir /dev_data/zzj/hzy/datasets/UCR/UWaveGestureLibraryZ --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset EOGVerticalSignal --data_dir /dev_data/zzj/hzy/datasets/UCR/EOGVerticalSignal --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset CBF --data_dir /dev_data/zzj/hzy/datasets/UCR/CBF --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset EOGHorizontalSignal --data_dir /dev_data/zzj/hzy/datasets/UCR/EOGHorizontalSignal --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Strawberry --data_dir /dev_data/zzj/hzy/datasets/UCR/Strawberry --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset StarLightCurves --data_dir /dev_data/zzj/hzy/datasets/UCR/StarLightCurves --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset DodgerLoopGame --data_dir /dev_data/zzj/hzy/datasets/UCR/DodgerLoopGame --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset FordA --data_dir /dev_data/zzj/hzy/datasets/UCR/FordA --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Fish --data_dir /dev_data/zzj/hzy/datasets/UCR/Fish --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset PigArtPressure --data_dir /dev_data/zzj/hzy/datasets/UCR/PigArtPressure --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ShakeGestureWiimoteZ --data_dir /dev_data/zzj/hzy/datasets/UCR/ShakeGestureWiimoteZ --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ECGFiveDays --data_dir /dev_data/zzj/hzy/datasets/UCR/ECGFiveDays --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GunPointOldVersusYoung --data_dir /dev_data/zzj/hzy/datasets/UCR/GunPointOldVersusYoung --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GesturePebbleZ2 --data_dir /dev_data/zzj/hzy/datasets/UCR/GesturePebbleZ2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ECG200 --data_dir /dev_data/zzj/hzy/datasets/UCR/ECG200 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Symbols --data_dir /dev_data/zzj/hzy/datasets/UCR/Symbols --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset FordB --data_dir /dev_data/zzj/hzy/datasets/UCR/FordB --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset FaceAll --data_dir /dev_data/zzj/hzy/datasets/UCR/FaceAll --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset MiddlePhalanxTW --data_dir /dev_data/zzj/hzy/datasets/UCR/MiddlePhalanxTW --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset MiddlePhalanxOutlineCorrect --data_dir /dev_data/zzj/hzy/datasets/UCR/MiddlePhalanxOutlineCorrect --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GestureMidAirD1 --data_dir /dev_data/zzj/hzy/datasets/UCR/GestureMidAirD1 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset InsectEPGRegularTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/InsectEPGRegularTrain --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset DodgerLoopDay --data_dir /dev_data/zzj/hzy/datasets/UCR/DodgerLoopDay --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ProximalPhalanxOutlineAgeGroup --data_dir /dev_data/zzj/hzy/datasets/UCR/ProximalPhalanxOutlineAgeGroup --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset HandOutlines --data_dir /dev_data/zzj/hzy/datasets/UCR/HandOutlines --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SwedishLeaf --data_dir /dev_data/zzj/hzy/datasets/UCR/SwedishLeaf --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset AllGestureWiimoteZ --data_dir /dev_data/zzj/hzy/datasets/UCR/AllGestureWiimoteZ --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset InsectWingbeatSound --data_dir /dev_data/zzj/hzy/datasets/UCR/InsectWingbeatSound --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset MiddlePhalanxOutlineAgeGroup --data_dir /dev_data/zzj/hzy/datasets/UCR/MiddlePhalanxOutlineAgeGroup --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GestureMidAirD3 --data_dir /dev_data/zzj/hzy/datasets/UCR/GestureMidAirD3 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ChlorineConcentration --data_dir /dev_data/zzj/hzy/datasets/UCR/ChlorineConcentration --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ArrowHead --data_dir /dev_data/zzj/hzy/datasets/UCR/ArrowHead --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Fungi --data_dir /dev_data/zzj/hzy/datasets/UCR/Fungi --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset PigAirwayPressure --data_dir /dev_data/zzj/hzy/datasets/UCR/PigAirwayPressure --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset PickupGestureWiimoteZ --data_dir /dev_data/zzj/hzy/datasets/UCR/PickupGestureWiimoteZ --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Rock --data_dir /dev_data/zzj/hzy/datasets/UCR/Rock --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Worms --data_dir /dev_data/zzj/hzy/datasets/UCR/Worms --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Lightning7 --data_dir /dev_data/zzj/hzy/datasets/UCR/Lightning7 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset BME --data_dir /dev_data/zzj/hzy/datasets/UCR/BME --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SyntheticControl --data_dir /dev_data/zzj/hzy/datasets/UCR/SyntheticControl --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset MoteStrain --data_dir /dev_data/zzj/hzy/datasets/UCR/MoteStrain --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SemgHandMovementCh2 --data_dir /dev_data/zzj/hzy/datasets/UCR/SemgHandMovementCh2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Mallat --data_dir /dev_data/zzj/hzy/datasets/UCR/Mallat --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GestureMidAirD2 --data_dir /dev_data/zzj/hzy/datasets/UCR/GestureMidAirD2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset CricketY --data_dir /dev_data/zzj/hzy/datasets/UCR/CricketY --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset NonInvasiveFetalECGThorax1 --data_dir /dev_data/zzj/hzy/datasets/UCR/NonInvasiveFetalECGThorax1 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ToeSegmentation2 --data_dir /dev_data/zzj/hzy/datasets/UCR/ToeSegmentation2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset ECG5000 --data_dir /dev_data/zzj/hzy/datasets/UCR/ECG5000 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Trace --data_dir /dev_data/zzj/hzy/datasets/UCR/Trace --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset WormsTwoClass --data_dir /dev_data/zzj/hzy/datasets/UCR/WormsTwoClass --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset GunPoint --data_dir /dev_data/zzj/hzy/datasets/UCR/GunPoint --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset UWaveGestureLibraryAll --data_dir /dev_data/zzj/hzy/datasets/UCR/UWaveGestureLibraryAll --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset EthanolLevel --data_dir /dev_data/zzj/hzy/datasets/UCR/EthanolLevel --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset WordSynonyms --data_dir /dev_data/zzj/hzy/datasets/UCR/WordSynonyms --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset HouseTwenty --data_dir /dev_data/zzj/hzy/datasets/UCR/HouseTwenty --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset DodgerLoopWeekend --data_dir /dev_data/zzj/hzy/datasets/UCR/DodgerLoopWeekend --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset Earthquakes --data_dir /dev_data/zzj/hzy/datasets/UCR/Earthquakes --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset TwoPatterns --data_dir /dev_data/zzj/hzy/datasets/UCR/TwoPatterns --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset DistalPhalanxOutlineCorrect --data_dir /dev_data/zzj/hzy/datasets/UCR/DistalPhalanxOutlineCorrect --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset SemgHandSubjectCh2 --data_dir /dev_data/zzj/hzy/datasets/UCR/SemgHandSubjectCh2 --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset FiftyWords --data_dir /dev_data/zzj/hzy/datasets/UCR/FiftyWords --batch_size 128 --task classification  --gpu 0 --epochs 1000\npython src/main.py --dataset BirdChicken --data_dir /dev_data/zzj/hzy/datasets/UCR/BirdChicken --batch_size 128 --task classification  --gpu 0 --epochs 1000\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/scripts/pretrain_finetune.sh",
    "content": "python src/main.py --dataset AllGestureWiimoteY --data_dir /dev_data/zzj/hzy/datasets/UCR/AllGestureWiimoteY --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset UWaveGestureLibraryX --data_dir /dev_data/zzj/hzy/datasets/UCR/UWaveGestureLibraryX --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset DiatomSizeReduction --data_dir /dev_data/zzj/hzy/datasets/UCR/DiatomSizeReduction --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset FreezerSmallTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/FreezerSmallTrain --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ScreenType --data_dir /dev_data/zzj/hzy/datasets/UCR/ScreenType --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset MixedShapesSmallTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/MixedShapesSmallTrain --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SonyAIBORobotSurface2 --data_dir /dev_data/zzj/hzy/datasets/UCR/SonyAIBORobotSurface2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset LargeKitchenAppliances --data_dir /dev_data/zzj/hzy/datasets/UCR/LargeKitchenAppliances --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ProximalPhalanxOutlineCorrect --data_dir /dev_data/zzj/hzy/datasets/UCR/ProximalPhalanxOutlineCorrect --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset OSULeaf --data_dir /dev_data/zzj/hzy/datasets/UCR/OSULeaf --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset OliveOil --data_dir /dev_data/zzj/hzy/datasets/UCR/OliveOil --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset FreezerRegularTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/FreezerRegularTrain --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Herring --data_dir /dev_data/zzj/hzy/datasets/UCR/Herring --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GesturePebbleZ1 --data_dir /dev_data/zzj/hzy/datasets/UCR/GesturePebbleZ1 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset MelbournePedestrian --data_dir /dev_data/zzj/hzy/datasets/UCR/MelbournePedestrian --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset PhalangesOutlinesCorrect --data_dir /dev_data/zzj/hzy/datasets/UCR/PhalangesOutlinesCorrect --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset CricketZ --data_dir /dev_data/zzj/hzy/datasets/UCR/CricketZ --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ACSF1 --data_dir /dev_data/zzj/hzy/datasets/UCR/ACSF1 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset FaceFour --data_dir /dev_data/zzj/hzy/datasets/UCR/FaceFour --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SemgHandGenderCh2 --data_dir /dev_data/zzj/hzy/datasets/UCR/SemgHandGenderCh2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Haptics --data_dir /dev_data/zzj/hzy/datasets/UCR/Haptics --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset UWaveGestureLibraryY --data_dir /dev_data/zzj/hzy/datasets/UCR/UWaveGestureLibraryY --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Coffee --data_dir /dev_data/zzj/hzy/datasets/UCR/Coffee --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset TwoLeadECG --data_dir /dev_data/zzj/hzy/datasets/UCR/TwoLeadECG --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset DistalPhalanxOutlineAgeGroup --data_dir /dev_data/zzj/hzy/datasets/UCR/DistalPhalanxOutlineAgeGroup --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset MixedShapesRegularTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/MixedShapesRegularTrain --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SmoothSubspace --data_dir /dev_data/zzj/hzy/datasets/UCR/SmoothSubspace --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Meat --data_dir /dev_data/zzj/hzy/datasets/UCR/Meat --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ShapesAll --data_dir /dev_data/zzj/hzy/datasets/UCR/ShapesAll --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset InsectEPGSmallTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/InsectEPGSmallTrain --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset CinCECGTorso --data_dir /dev_data/zzj/hzy/datasets/UCR/CinCECGTorso --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset BeetleFly --data_dir /dev_data/zzj/hzy/datasets/UCR/BeetleFly --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Ham --data_dir /dev_data/zzj/hzy/datasets/UCR/Ham --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ProximalPhalanxTW --data_dir /dev_data/zzj/hzy/datasets/UCR/ProximalPhalanxTW --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ItalyPowerDemand --data_dir /dev_data/zzj/hzy/datasets/UCR/ItalyPowerDemand --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GunPointMaleVersusFemale --data_dir /dev_data/zzj/hzy/datasets/UCR/GunPointMaleVersusFemale --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SonyAIBORobotSurface1 --data_dir /dev_data/zzj/hzy/datasets/UCR/SonyAIBORobotSurface1 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset MedicalImages --data_dir /dev_data/zzj/hzy/datasets/UCR/MedicalImages --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SmallKitchenAppliances --data_dir /dev_data/zzj/hzy/datasets/UCR/SmallKitchenAppliances --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset PigCVP --data_dir /dev_data/zzj/hzy/datasets/UCR/PigCVP --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Crop --data_dir /dev_data/zzj/hzy/datasets/UCR/Crop --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Chinatown --data_dir /dev_data/zzj/hzy/datasets/UCR/Chinatown --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset PLAID --data_dir /dev_data/zzj/hzy/datasets/UCR/PLAID --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset RefrigerationDevices --data_dir /dev_data/zzj/hzy/datasets/UCR/RefrigerationDevices --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Wine --data_dir /dev_data/zzj/hzy/datasets/UCR/Wine --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Yoga --data_dir /dev_data/zzj/hzy/datasets/UCR/Yoga --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset AllGestureWiimoteX --data_dir /dev_data/zzj/hzy/datasets/UCR/AllGestureWiimoteX --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset DistalPhalanxTW --data_dir /dev_data/zzj/hzy/datasets/UCR/DistalPhalanxTW --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Computers --data_dir /dev_data/zzj/hzy/datasets/UCR/Computers --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ElectricDevices --data_dir /dev_data/zzj/hzy/datasets/UCR/ElectricDevices --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Adiac --data_dir /dev_data/zzj/hzy/datasets/UCR/Adiac --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset InlineSkate --data_dir /dev_data/zzj/hzy/datasets/UCR/InlineSkate --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset FacesUCR --data_dir /dev_data/zzj/hzy/datasets/UCR/FacesUCR --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ShapeletSim --data_dir /dev_data/zzj/hzy/datasets/UCR/ShapeletSim --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GunPointAgeSpan --data_dir /dev_data/zzj/hzy/datasets/UCR/GunPointAgeSpan --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Phoneme --data_dir /dev_data/zzj/hzy/datasets/UCR/Phoneme --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset CricketX --data_dir /dev_data/zzj/hzy/datasets/UCR/CricketX --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Lightning2 --data_dir /dev_data/zzj/hzy/datasets/UCR/Lightning2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Beef --data_dir /dev_data/zzj/hzy/datasets/UCR/Beef --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset PowerCons --data_dir /dev_data/zzj/hzy/datasets/UCR/PowerCons --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Plane --data_dir /dev_data/zzj/hzy/datasets/UCR/Plane --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset NonInvasiveFetalECGThorax2 --data_dir /dev_data/zzj/hzy/datasets/UCR/NonInvasiveFetalECGThorax2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset UMD --data_dir /dev_data/zzj/hzy/datasets/UCR/UMD --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Wafer --data_dir /dev_data/zzj/hzy/datasets/UCR/Wafer --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ToeSegmentation1 --data_dir /dev_data/zzj/hzy/datasets/UCR/ToeSegmentation1 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Car --data_dir /dev_data/zzj/hzy/datasets/UCR/Car --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset UWaveGestureLibraryZ --data_dir /dev_data/zzj/hzy/datasets/UCR/UWaveGestureLibraryZ --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset EOGVerticalSignal --data_dir /dev_data/zzj/hzy/datasets/UCR/EOGVerticalSignal --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset CBF --data_dir /dev_data/zzj/hzy/datasets/UCR/CBF --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset EOGHorizontalSignal --data_dir /dev_data/zzj/hzy/datasets/UCR/EOGHorizontalSignal --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Strawberry --data_dir /dev_data/zzj/hzy/datasets/UCR/Strawberry --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset StarLightCurves --data_dir /dev_data/zzj/hzy/datasets/UCR/StarLightCurves --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset DodgerLoopGame --data_dir /dev_data/zzj/hzy/datasets/UCR/DodgerLoopGame --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset FordA --data_dir /dev_data/zzj/hzy/datasets/UCR/FordA --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Fish --data_dir /dev_data/zzj/hzy/datasets/UCR/Fish --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset PigArtPressure --data_dir /dev_data/zzj/hzy/datasets/UCR/PigArtPressure --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ShakeGestureWiimoteZ --data_dir /dev_data/zzj/hzy/datasets/UCR/ShakeGestureWiimoteZ --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ECGFiveDays --data_dir /dev_data/zzj/hzy/datasets/UCR/ECGFiveDays --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GunPointOldVersusYoung --data_dir /dev_data/zzj/hzy/datasets/UCR/GunPointOldVersusYoung --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GesturePebbleZ2 --data_dir /dev_data/zzj/hzy/datasets/UCR/GesturePebbleZ2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ECG200 --data_dir /dev_data/zzj/hzy/datasets/UCR/ECG200 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Symbols --data_dir /dev_data/zzj/hzy/datasets/UCR/Symbols --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset FordB --data_dir /dev_data/zzj/hzy/datasets/UCR/FordB --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset FaceAll --data_dir /dev_data/zzj/hzy/datasets/UCR/FaceAll --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset MiddlePhalanxTW --data_dir /dev_data/zzj/hzy/datasets/UCR/MiddlePhalanxTW --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset MiddlePhalanxOutlineCorrect --data_dir /dev_data/zzj/hzy/datasets/UCR/MiddlePhalanxOutlineCorrect --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GestureMidAirD1 --data_dir /dev_data/zzj/hzy/datasets/UCR/GestureMidAirD1 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset InsectEPGRegularTrain --data_dir /dev_data/zzj/hzy/datasets/UCR/InsectEPGRegularTrain --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset DodgerLoopDay --data_dir /dev_data/zzj/hzy/datasets/UCR/DodgerLoopDay --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ProximalPhalanxOutlineAgeGroup --data_dir /dev_data/zzj/hzy/datasets/UCR/ProximalPhalanxOutlineAgeGroup --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset HandOutlines --data_dir /dev_data/zzj/hzy/datasets/UCR/HandOutlines --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SwedishLeaf --data_dir /dev_data/zzj/hzy/datasets/UCR/SwedishLeaf --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset AllGestureWiimoteZ --data_dir /dev_data/zzj/hzy/datasets/UCR/AllGestureWiimoteZ --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset InsectWingbeatSound --data_dir /dev_data/zzj/hzy/datasets/UCR/InsectWingbeatSound --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset MiddlePhalanxOutlineAgeGroup --data_dir /dev_data/zzj/hzy/datasets/UCR/MiddlePhalanxOutlineAgeGroup --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GestureMidAirD3 --data_dir /dev_data/zzj/hzy/datasets/UCR/GestureMidAirD3 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ChlorineConcentration --data_dir /dev_data/zzj/hzy/datasets/UCR/ChlorineConcentration --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ArrowHead --data_dir /dev_data/zzj/hzy/datasets/UCR/ArrowHead --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Fungi --data_dir /dev_data/zzj/hzy/datasets/UCR/Fungi --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset PigAirwayPressure --data_dir /dev_data/zzj/hzy/datasets/UCR/PigAirwayPressure --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset PickupGestureWiimoteZ --data_dir /dev_data/zzj/hzy/datasets/UCR/PickupGestureWiimoteZ --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Rock --data_dir /dev_data/zzj/hzy/datasets/UCR/Rock --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Worms --data_dir /dev_data/zzj/hzy/datasets/UCR/Worms --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Lightning7 --data_dir /dev_data/zzj/hzy/datasets/UCR/Lightning7 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset BME --data_dir /dev_data/zzj/hzy/datasets/UCR/BME --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SyntheticControl --data_dir /dev_data/zzj/hzy/datasets/UCR/SyntheticControl --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset MoteStrain --data_dir /dev_data/zzj/hzy/datasets/UCR/MoteStrain --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SemgHandMovementCh2 --data_dir /dev_data/zzj/hzy/datasets/UCR/SemgHandMovementCh2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Mallat --data_dir /dev_data/zzj/hzy/datasets/UCR/Mallat --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GestureMidAirD2 --data_dir /dev_data/zzj/hzy/datasets/UCR/GestureMidAirD2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset CricketY --data_dir /dev_data/zzj/hzy/datasets/UCR/CricketY --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset NonInvasiveFetalECGThorax1 --data_dir /dev_data/zzj/hzy/datasets/UCR/NonInvasiveFetalECGThorax1 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ToeSegmentation2 --data_dir /dev_data/zzj/hzy/datasets/UCR/ToeSegmentation2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset ECG5000 --data_dir /dev_data/zzj/hzy/datasets/UCR/ECG5000 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Trace --data_dir /dev_data/zzj/hzy/datasets/UCR/Trace --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset WormsTwoClass --data_dir /dev_data/zzj/hzy/datasets/UCR/WormsTwoClass --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset GunPoint --data_dir /dev_data/zzj/hzy/datasets/UCR/GunPoint --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset UWaveGestureLibraryAll --data_dir /dev_data/zzj/hzy/datasets/UCR/UWaveGestureLibraryAll --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset EthanolLevel --data_dir /dev_data/zzj/hzy/datasets/UCR/EthanolLevel --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset WordSynonyms --data_dir /dev_data/zzj/hzy/datasets/UCR/WordSynonyms --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset HouseTwenty --data_dir /dev_data/zzj/hzy/datasets/UCR/HouseTwenty --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset DodgerLoopWeekend --data_dir /dev_data/zzj/hzy/datasets/UCR/DodgerLoopWeekend --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset Earthquakes --data_dir /dev_data/zzj/hzy/datasets/UCR/Earthquakes --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset TwoPatterns --data_dir /dev_data/zzj/hzy/datasets/UCR/TwoPatterns --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset DistalPhalanxOutlineCorrect --data_dir /dev_data/zzj/hzy/datasets/UCR/DistalPhalanxOutlineCorrect --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset SemgHandSubjectCh2 --data_dir /dev_data/zzj/hzy/datasets/UCR/SemgHandSubjectCh2 --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset FiftyWords --data_dir /dev_data/zzj/hzy/datasets/UCR/FiftyWords --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\npython src/main.py --dataset BirdChicken --data_dir /dev_data/zzj/hzy/datasets/UCR/BirdChicken --batch_size 128 --task pretrain_and_finetune  --gpu 0 --epochs 400\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/tst_cls/src/dataprepare.py",
    "content": "import enum\nimport pandas as pd\nfrom sklearn import model_selection\nimport sklearn\nfrom sklearn.metrics import normalized_mutual_info_score\nfrom sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\nfrom sklearn import preprocessing\nimport numpy as np\nimport os\nimport argparse\nimport shutil\n\n\n\n\ndef load_data(dataroot, dataset):\n    train = pd.read_csv(os.path.join(dataroot, dataset,\n                        dataset+'_TRAIN.tsv'), sep='\\t', header=None)\n    train_x = train.iloc[:, 1:]\n    train_target = train.iloc[:, 0]\n\n    test = pd.read_csv(os.path.join(dataroot, dataset,\n                       dataset+'_TEST.tsv'), sep='\\t', header=None)\n    test_x = test.iloc[:, 1:]\n    test_target = test.iloc[:, 0]\n\n    sum_dataset = pd.concat([train_x, test_x]).to_numpy(np.float32)\n    #sum_dataset = sum_dataset.fillna(sum_dataset.mean()).to_np(dtype=np.float32)\n    sum_target = pd.concat([train_target, test_target]).to_numpy(np.float32)\n    # sum_target = sum_target.fillna(sum_target.mean()).to_np(dtype=np.float32)\n\n    num_classes = len(np.unique(sum_target))\n\n    #sum_target = transfer_labels(sum_target)\n    return sum_dataset, sum_target\n\n\ndef transfer_labels(labels):\n    indicies = np.unique(labels)\n    num_samples = labels.shape[0]\n\n    for i in range(num_samples):\n        new_label = np.argwhere(labels[i] == indicies)[0][0]\n        labels[i] = new_label\n\n    return labels\n\n\ndef k_fold(data, target):\n    skf = StratifiedKFold(5, shuffle=True)\n    #skf = StratifiedShuffleSplit(5)\n    train_sets = []\n    train_targets = []\n\n    val_sets = []\n    val_targets = []\n\n    test_sets = []\n    test_targets = []\n\n    for raw_index, test_index in skf.split(data, target):\n        raw_set = data[raw_index]\n        raw_target = target[raw_index]\n\n        test_sets.append(data[test_index])\n        test_targets.append(target[test_index])\n\n        train_index, val_index = next(StratifiedKFold(\n            4, shuffle=True).split(raw_set, raw_target))\n        # train_index, val_index = next(StratifiedShuffleSplit(1).split(raw_set, raw_target))\n        train_sets.append(raw_set[train_index])\n        train_targets.append(raw_target[train_index])\n\n        val_sets.append(raw_set[val_index])\n        val_targets.append(raw_target[val_index])\n\n    return np.array(train_sets), np.array(train_targets), np.array(val_sets), np.array(val_targets), np.array(test_sets), np.array(test_targets)\n\n\ndef normalize_per_series(data):\n    std_ = np.std(data, axis=1, keepdims=True)\n    std_[std_ == 0] = 1.0\n    return (data - np.mean(data, axis=1, keepdims=True)) / std_\n\n\ndef fill_nan_value(train_set, val_set, test_set):\n\n    ind = np.where(np.isnan(train_set))\n    col_mean = np.nanmean(train_set, axis=0)\n    col_mean[np.isnan(col_mean)] = 1e-6\n\n    train_set[ind] = np.take(col_mean, ind[1])\n\n    ind_val = np.where(np.isnan(val_set))\n    val_set[ind_val] = np.take(col_mean, ind_val[1])\n\n    ind_test = np.where(np.isnan(test_set))\n    test_set[ind_test] = np.take(col_mean, ind_test[1])\n    return train_set, val_set, test_set\n\n# input: dataframe after .loc[indices]\n# output: a dataframe, input of the dataset_class\n\n\ndef fill_nan_and_normalize(train_data, val_data, test_data, train_indices, val_indices, test_indices):\n    train_arr = np.array(train_data)\n    train_arr = np.reshape(train_arr, [len(train_indices), -1])\n\n    val_arr = np.array(val_data)\n    val_arr = np.reshape(val_arr, [len(val_indices), -1])\n\n    test_arr = np.array(test_data)\n    test_arr = np.reshape(test_arr, [len(test_indices), -1])\n\n    train_arr, val_arr, test_arr = fill_nan_value(train_arr, val_arr, test_arr)\n\n    train_arr = normalize_per_series(train_arr)\n    val_arr = normalize_per_series(val_arr)\n    test_arr = normalize_per_series(test_arr)\n\n    train_raw = pd.DataFrame(train_arr)\n    train_df = pd.DataFrame()\n    train_df['dim_0'] = [pd.Series(train_raw.iloc[x, :])\n                         for x in range(len(train_raw))]\n    lengths = train_df.applymap(lambda x: len(x)).values\n    train_df = pd.concat((pd.DataFrame({col: train_df.loc[row, col] for col in train_df.columns}).reset_index(drop=True).set_index(\n        pd.Series(lengths[row, 0]*[row])) for row in range(train_df.shape[0])), axis=0)\n    train_df = train_df.groupby(train_df.index).transform(lambda x: x)\n\n    val_raw = pd.DataFrame(val_arr)\n    val_df = pd.DataFrame()\n    val_df['dim_0'] = [pd.Series(val_raw.iloc[x, :])\n                       for x in range(len(val_raw))]\n    lengths = val_df.applymap(lambda x: len(x)).values\n    val_df = pd.concat((pd.DataFrame({col: val_df.loc[row, col] for col in val_df.columns}).reset_index(drop=True).set_index(\n        pd.Series(lengths[row, 0]*[row])) for row in range(val_df.shape[0])), axis=0)\n    val_df = val_df.groupby(val_df.index).transform(lambda x: x)\n\n    test_raw = pd.DataFrame(test_arr)\n    test_df = pd.DataFrame()\n    test_df['dim_0'] = [pd.Series(test_raw.iloc[x, :])\n                        for x in range(len(test_raw))]\n    lengths = test_df.applymap(lambda x: len(x)).values\n    test_df = pd.concat((pd.DataFrame({col: test_df.loc[row, col] for col in test_df.columns}).reset_index(drop=True).set_index(\n        pd.Series(lengths[row, 0]*[row])) for row in range(test_df.shape[0])), axis=0)\n    test_df = test_df.groupby(test_df.index).transform(lambda x: x)\n\n    return train_df, val_df, test_df\n\n\nif __name__ == '__main__':\n    '''\n    CACHE_PATH = './src/data_cache'\n    shutil.rmtree(CACHE_PATH)\n    os.mkdir(CACHE_PATH)\n\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument('--dataroot', default='/dev_data/zzj/hzy/datasets/UCR', type=str)\n    parser.add_argument('--dataset', default='ArrowHead', type=str)\n\n    args = parser.parse_args()\n\n    sum_dataset, sum_target = load_data(args.dataroot, args.dataset)\n    print(sum_target)\n    train_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = k_fold(sum_dataset, sum_target)\n\n    for i, train_dataset in enumerate(train_datasets):\n        train_target = train_targets[i]\n\n        val_dataset = val_datasets[i]\n        val_target = val_targets[i]\n\n        test_dataset = test_datasets[i]\n        test_target = test_targets[i]\n\n\n    test1 = pd.DataFrame(train_datasets[0])\n    test2 = pd.DataFrame(train_targets[0])\n\n    out = pd.concat([test2, test1], axis=1, ignore_index=True)\n    out.to_csv(os.path.join(CACHE_PATH, 'temp.tsv'), sep='\\t', index=False, header=False)\n    ds, target = sktime.utils.load_data.load_from_ucr_tsv_to_dataframe(os.path.join(CACHE_PATH, 'temp.tsv'), return_separate_X_and_y=True)\n\n    print(ds)\n    print(target)\n    '''\n    sklearn.random.seed(42)\n    sum_dataset, sum_target = load_data(\n        '/dev_data/zzj/hzy/datasets/UCR', 'ArrowHead')\n    skf = model_selection.StratifiedKFold(5, shuffle=True, random_state=42)\n\n    for x, y in skf.split(sum_dataset, sum_target):\n        print(x, y)\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/datasets/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/tst_cls/src/datasets/data.py",
    "content": "from typing import Optional\nimport os\nfrom multiprocessing import Pool, cpu_count\nimport glob\nimport re\nimport logging\nfrom itertools import repeat, chain\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sktime.utils import load_data\n\nfrom datasets import utils\n\nlogger = logging.getLogger('__main__')\n\n\nclass Normalizer(object):\n    \"\"\"\n    Normalizes dataframe across ALL contained rows (time steps). Different from per-sample normalization.\n    \"\"\"\n\n    def __init__(self, norm_type, mean=None, std=None, min_val=None, max_val=None):\n        \"\"\"\n        Args:\n            norm_type: choose from:\n                \"standardization\", \"minmax\": normalizes dataframe across ALL contained rows (time steps)\n                \"per_sample_std\", \"per_sample_minmax\": normalizes each sample separately (i.e. across only its own rows)\n            mean, std, min_val, max_val: optional (num_feat,) Series of pre-computed values\n        \"\"\"\n\n        self.norm_type = norm_type\n        self.mean = mean\n        self.std = std\n        self.min_val = min_val\n        self.max_val = max_val\n\n    def normalize(self, df):\n        \"\"\"\n        Args:\n            df: input dataframe\n        Returns:\n            df: normalized dataframe\n        \"\"\"\n        if self.norm_type == \"standardization\":\n            if self.mean is None:\n                self.mean = df.mean()\n                self.std = df.std()\n            return (df - self.mean) / (self.std + np.finfo(float).eps)\n\n        elif self.norm_type == \"minmax\":\n            if self.max_val is None:\n                self.max_val = df.max()\n                self.min_val = df.min()\n            return (df - self.min_val) / (self.max_val - self.min_val + np.finfo(float).eps)\n\n        elif self.norm_type == \"per_sample_std\":\n            grouped = df.groupby(by=df.index)\n            return (df - grouped.transform('mean')) / grouped.transform('std')\n\n        elif self.norm_type == \"per_sample_minmax\":\n            grouped = df.groupby(by=df.index)\n            min_vals = grouped.transform('min')\n            return (df - min_vals) / (grouped.transform('max') - min_vals + np.finfo(float).eps)\n\n        else:\n            raise (\n                NameError(f'Normalize method \"{self.norm_type}\" not implemented'))\n\n\ndef interpolate_missing(y):\n    \"\"\"\n    Replaces NaN values in pd.Series `y` using linear interpolation\n    \"\"\"\n    if y.isna().any():\n        y = y.interpolate(method='linear', limit_direction='both')\n    return y\n\n\ndef subsample(y, limit=256, factor=2):\n    \"\"\"\n    If a given Series is longer than `limit`, returns subsampled sequence by the specified integer factor\n    \"\"\"\n    if len(y) > limit:\n        return y[::factor].reset_index(drop=True)\n    return y\n\n\nclass BaseData(object):\n\n    def set_num_processes(self, n_proc):\n\n        if (n_proc is None) or (n_proc <= 0):\n            self.n_proc = cpu_count()  # max(1, cpu_count() - 1)\n        else:\n            self.n_proc = min(n_proc, cpu_count())\n\n\nclass HDD_data(BaseData):\n    \"\"\"\n    Dataset class for Hard Drive Disk failure dataset # TODO: INCOMPLETE: does not follow other datasets format\n    Attributes:\n        all_df: (num_samples * seq_len, num_columns) dataframe indexed by integer indices, with multiple rows corresponding to the same index (sample).\n            Each row is a time step; Each column contains either metadata (e.g. timestamp) or a feature.\n        all_IDs: (num_samples,) series of IDs contained in `all_df`/`feature_df` (same as all_df.index.unique() )\n    \"\"\"\n\n    def __init__(self, root_dir, file_list=None, pattern=None, n_proc=1, limit_size=None, config=None):\n\n        self.set_num_processes(n_proc=n_proc)\n\n        self.all_df = self.load_all(root_dir)\n        # Sort by serial number and date and index by serial number\n        self.all_df = self.all_df.sort_values(by=['serial_number', 'date'])\n        self.all_df = self.all_df.set_index('serial_number')\n\n        # all asset(disk) IDs (serial numbers)\n        self.all_IDs = self.all_df.index.unique()\n        self.failed_IDs = self.all_df[\n            self.all_df.failure == 1].index.unique()  # IDs corresponding to assets which failed\n        self.normal_IDs = sorted(\n            list(set(self.all_IDs) - set(self.failed_IDs)))  # IDs corresponding to assets without failure\n\n    def load_all(self, dir_path):\n        \"\"\"\n        Loads datasets from all csv files contained in `dir_path` into a dataframe\n        Args:\n            dir_path: directory containing all individual .csv files. Corresponds to a Quarter\n\n        Returns:\n        \"\"\"\n        # each file name corresponds to another date\n        input_paths = [os.path.join(dir_path, f) for f in os.listdir(dir_path)\n                       if os.path.isfile(os.path.join(dir_path, f)) and f.endswith('.csv')]\n\n        if self.n_proc > 1:\n            # Load in parallel\n            # no more than file_names needed here\n            _n_proc = min(self.n_proc, len(input_paths))\n            logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(\n                len(input_paths), _n_proc))\n            with Pool(processes=_n_proc) as pool:\n                all_df = pd.concat(pool.map(HDD_data.load_single, input_paths))\n        else:  # read 1 file at a time\n            all_df = pd.concat(HDD_data.load_single(path)\n                               for path in input_paths)\n\n        return all_df\n\n    @staticmethod\n    def load_single(filepath):\n        df = HDD_data.read_data(filepath)\n        df = HDD_data.select_columns(df)\n\n        return df\n\n    @staticmethod\n    def read_data(filepath):\n        \"\"\"Reads a single .csv, which typically contains a day of datasets of various disks.\n        Only Seagate disks are retained.\"\"\"\n        df = pd.read_csv(filepath)\n        # only Seagate models, starting with 'ST', are used\n        return df[df['model'].apply(lambda x: x.startswith('ST'))]\n\n    @staticmethod\n    def select_columns(df):\n        \"\"\"Smart9 is the drive's age in hours\"\"\"\n        df = df.dropna(\n            axis='columns', how='all')  # drop columns containing only NaN\n        keep_cols = [col for col in df.columns if 'normalized' not in col]\n        df = df[keep_cols]\n        return df\n\n    @staticmethod\n    def process_columns(df):\n\n        df['date'] = pd.to_datetime(df['date'])\n        df['failure'] = df['failure'].astype(bool)\n        df[['capacity_bytes', 'model']] = df[[\n            'capacity_bytes', 'model']].astype('category')\n\n        return df\n\n\nclass WeldData(BaseData):\n    \"\"\"\n    Dataset class for welding dataset.\n    Attributes:\n        all_df: dataframe indexed by ID, with multiple rows corresponding to the same index (sample).\n            Each row is a time step; Each column contains either metadata (e.g. timestamp) or a feature.\n        feature_df: contains the subset of columns of `all_df` which correspond to selected features\n        feature_names: names of columns contained in `feature_df` (same as feature_df.columns)\n        all_IDs: IDs contained in `all_df`/`feature_df` (same as all_df.index.unique() )\n        max_seq_len: maximum sequence (time series) length. If None, script argument `max_seq_len` will be used.\n            (Moreover, script argument overrides this attribute)\n    \"\"\"\n\n    def __init__(self, root_dir, file_list=None, pattern=None, n_proc=1, limit_size=None, config=None):\n\n        self.set_num_processes(n_proc=n_proc)\n\n        self.all_df = self.load_all(\n            root_dir, file_list=file_list, pattern=pattern)\n        self.all_df = self.all_df.sort_values(\n            by=['weld_record_index'])  # datasets is presorted\n        # TODO: There is a single ID that causes the model output to become nan - not clear why\n        # exclude particular ID\n        self.all_df = self.all_df[self.all_df['weld_record_index'] != 920397]\n        self.all_df = self.all_df.set_index('weld_record_index')\n        self.all_IDs = self.all_df.index.unique()  # all sample (session) IDs\n        self.max_seq_len = 66\n        if limit_size is not None:\n            if limit_size > 1:\n                limit_size = int(limit_size)\n            else:  # interpret as proportion if in (0, 1]\n                limit_size = int(limit_size * len(self.all_IDs))\n            self.all_IDs = self.all_IDs[:limit_size]\n            self.all_df = self.all_df.loc[self.all_IDs]\n\n        self.feature_names = ['wire_feed_speed',\n                              'current', 'voltage', 'motor_current', 'power']\n        self.feature_df = self.all_df[self.feature_names]\n\n    def load_all(self, root_dir, file_list=None, pattern=None):\n        \"\"\"\n        Loads datasets from csv files contained in `root_dir` into a dataframe, optionally choosing from `pattern`\n        Args:\n            root_dir: directory containing all individual .csv files\n            file_list: optionally, provide a list of file paths within `root_dir` to consider.\n                Otherwise, entire `root_dir` contents will be used.\n            pattern: optionally, apply regex string to select subset of files\n        Returns:\n            all_df: a single (possibly concatenated) dataframe with all data corresponding to specified files\n        \"\"\"\n        # each file name corresponds to another date. Also tools (A, B) and others.\n\n        # Select paths for training and evaluation\n        if file_list is None:\n            data_paths = glob.glob(os.path.join(\n                root_dir, '*'))  # list of all paths\n        else:\n            data_paths = [os.path.join(root_dir, p) for p in file_list]\n        if len(data_paths) == 0:\n            raise Exception('No files found using: {}'.format(\n                os.path.join(root_dir, '*')))\n\n        if pattern is None:\n            # by default evaluate on\n            selected_paths = data_paths\n        else:\n            selected_paths = list(\n                filter(lambda x: re.search(pattern, x), data_paths))\n\n        input_paths = [p for p in selected_paths if os.path.isfile(\n            p) and p.endswith('.csv')]\n        if len(input_paths) == 0:\n            raise Exception(\n                \"No .csv files found using pattern: '{}'\".format(pattern))\n\n        if self.n_proc > 1:\n            # Load in parallel\n            # no more than file_names needed here\n            _n_proc = min(self.n_proc, len(input_paths))\n            logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(\n                len(input_paths), _n_proc))\n            with Pool(processes=_n_proc) as pool:\n                all_df = pd.concat(pool.map(WeldData.load_single, input_paths))\n        else:  # read 1 file at a time\n            all_df = pd.concat(WeldData.load_single(path)\n                               for path in input_paths)\n\n        return all_df\n\n    @staticmethod\n    def load_single(filepath):\n        df = WeldData.read_data(filepath)\n        df = WeldData.select_columns(df)\n        num_nan = df.isna().sum().sum()\n        if num_nan > 0:\n            logger.warning(\n                \"{} nan values in {} will be replaced by 0\".format(num_nan, filepath))\n            df = df.fillna(0)\n\n        return df\n\n    @staticmethod\n    def read_data(filepath):\n        \"\"\"Reads a single .csv, which typically contains a day of datasets of various weld sessions.\n        \"\"\"\n        df = pd.read_csv(filepath)\n        return df\n\n    @staticmethod\n    def select_columns(df):\n        \"\"\"\"\"\"\n        df = df.rename(columns={\"per_energy\": \"power\"})\n        # Sometimes 'diff_time' is not measured correctly (is 0), and power ('per_energy') becomes infinite\n        is_error = df['power'] > 1e16\n        df.loc[is_error, 'power'] = df.loc[is_error,\n                                           'true_energy'] / df['diff_time'].median()\n\n        df['weld_record_index'] = df['weld_record_index'].astype(int)\n        keep_cols = ['weld_record_index', 'wire_feed_speed',\n                     'current', 'voltage', 'motor_current', 'power']\n        df = df[keep_cols]\n\n        return df\n\n\nclass TSRegressionArchive(BaseData):\n    \"\"\"\n    Dataset class for datasets included in:\n        1) the Time Series Regression Archive (www.timeseriesregression.org), or\n        2) the Time Series Classification Archive (www.timeseriesclassification.com)\n    Attributes:\n        all_df: (num_samples * seq_len, num_columns) dataframe indexed by integer indices, with multiple rows corresponding to the same index (sample).\n            Each row is a time step; Each column contains either metadata (e.g. timestamp) or a feature.\n        feature_df: (num_samples * seq_len, feat_dim) dataframe; contains the subset of columns of `all_df` which correspond to selected features\n        feature_names: names of columns contained in `feature_df` (same as feature_df.columns)\n        all_IDs: (num_samples,) series of IDs contained in `all_df`/`feature_df` (same as all_df.index.unique() )\n        labels_df: (num_samples, num_labels) pd.DataFrame of label(s) for each sample\n        max_seq_len: maximum sequence (time series) length. If None, script argument `max_seq_len` will be used.\n            (Moreover, script argument overrides this attribute)\n    \"\"\"\n\n    def __init__(self, root_dir, file_list=None, pattern=None, n_proc=1, limit_size=None, config=None):\n\n        # self.set_num_processes(n_proc=n_proc)\n\n        self.config = config\n\n        self.all_df, self.labels_df = self.load_all(\n            root_dir, file_list=file_list, pattern=pattern)\n        # all sample IDs (integer indices 0 ... num_samples-1)\n        self.all_IDs = self.all_df.index.unique()\n\n        if limit_size is not None:\n            if limit_size > 1:\n                limit_size = int(limit_size)\n            else:  # interpret as proportion if in (0, 1]\n                limit_size = int(limit_size * len(self.all_IDs))\n            self.all_IDs = self.all_IDs[:limit_size]\n            self.all_df = self.all_df.loc[self.all_IDs]\n\n        # use all features\n        self.feature_names = self.all_df.columns\n        self.feature_df = self.all_df\n\n    def load_all(self, root_dir, file_list=None, pattern=None):\n        \"\"\"\n        Loads datasets from csv files contained in `root_dir` into a dataframe, optionally choosing from `pattern`\n        Args:\n            root_dir: directory containing all individual .csv files\n            file_list: optionally, provide a list of file paths within `root_dir` to consider.\n                Otherwise, entire `root_dir` contents will be used.\n            pattern: optionally, apply regex string to select subset of files\n        Returns:\n            all_df: a single (possibly concatenated) dataframe with all data corresponding to specified files\n            labels_df: dataframe containing label(s) for each sample\n        \"\"\"\n\n        # Select paths for training and evaluation\n        if file_list is None:\n            data_paths = glob.glob(os.path.join(\n                root_dir, '*'))  # list of all paths\n        else:\n            data_paths = [os.path.join(root_dir, p) for p in file_list]\n        if len(data_paths) == 0:\n            raise Exception('No files found using: {}'.format(\n                os.path.join(root_dir, '*')))\n\n        if pattern is None:\n            # by default evaluate on\n            selected_paths = data_paths\n        else:\n            selected_paths = list(\n                filter(lambda x: re.search(pattern, x), data_paths))\n\n        input_paths = [p for p in selected_paths if os.path.isfile(\n            p) and p.endswith('.tsv')]\n\n        if len(input_paths) == 0:\n            raise Exception(\n                \"No .tsv files found using pattern: '{}'\".format(pattern))\n\n        df1, label1 = self.load_single(input_paths[0])\n        df2, label2 = self.load_single(input_paths[1])\n\n        all_df = pd.concat([df1, df2], ignore_index=True)\n\n        labels_df = pd.concat([label1, label2], ignore_index=True)\n        # all_df, labels_df = self.load_single(input_paths[0])  # a single file contains dataset\n        lengths = all_df.applymap(lambda x: len(x)).values\n        all_df = pd.concat((pd.DataFrame({col: all_df.loc[row, col] for col in all_df.columns}).reset_index(drop=True).set_index(\n            pd.Series(lengths[row, 0]*[row])) for row in range(all_df.shape[0])), axis=0)\n\n        # Replace NaN values\n\n        grp = all_df.groupby(by=all_df.index)\n        all_df = grp.transform(interpolate_missing)\n\n        # all_df.to_csv('/dev_data/zzj/hzy/pretrained_model/results_on_ucr/all_df.csv')\n        #\n        # (all_df.loc[0])\n        return all_df, labels_df\n\n    def load_single(self, filepath):\n\n        # Every row of the returned df corresponds to a sample;\n        # every column is a pd.Series indexed by timestamp and corresponds to a different dimension (feature)\n        if self.config['task'] == 'regression':\n            df, labels = utils.load_from_tsfile_to_dataframe(\n                filepath, return_separate_X_and_y=True, replace_missing_vals_with='NaN')\n            labels_df = pd.DataFrame(labels, dtype=np.float32)\n        elif self.config['task'] == 'classification':\n            # TODO UCR ver.\n            df, labels = load_data.load_from_ucr_tsv_to_dataframe(\n                filepath, return_separate_X_and_y=True)\n            #df, labels = load_data.load_from_tsfile_to_dataframe(filepath, return_separate_X_and_y=True, replace_missing_vals_with='NaN')\n            labels = pd.Series(labels, dtype=\"category\")\n            self.class_names = labels.cat.categories\n            # int8-32 gives an error when using nn.CrossEntropyLoss\n            labels_df = pd.DataFrame(labels.cat.codes, dtype=np.int8)\n        else:  # e.g. imputation, TODO, use uce_tsv\n            try:\n                df, labels = load_data.load_from_ucr_tsv_to_dataframe(filepath, return_separate_X_and_y=True\n                                                                      )\n            except:\n                df, labels = load_data.load_from_ucr_tsv_to_dataframe(\n                    filepath, return_separate_X_and_y=True, replace_missing_vals_with='NaN')\n\n            labels = pd.Series(labels, dtype=\"category\")\n            labels_df = pd.DataFrame(labels.cat.codes, dtype=np.int8)\n\n        # (num_samples, num_dimensions) array containing the length of each series\n        lengths = df.applymap(lambda x: len(x)).values\n        horiz_diffs = np.abs(lengths - np.expand_dims(lengths[:, 0], -1))\n\n        # most general check: len(np.unique(lengths.values)) > 1:  # returns array of unique lengths of sequences\n        if np.sum(horiz_diffs) > 0:  # if any row (sample) has varying length across dimensions\n            logger.warning(\n                \"Not all time series dimensions have same length - will attempt to fix by subsampling first dimension...\")\n            # TODO: this addresses a very specific case (PPGDalia)\n            df = df.applymap(subsample)\n\n        if self.config['subsample_factor']:\n            df = df.applymap(lambda x: subsample(\n                x, limit=0, factor=self.config['subsample_factor']))\n\n        lengths = df.applymap(lambda x: len(x)).values\n        vert_diffs = np.abs(lengths - np.expand_dims(lengths[0, :], 0))\n        if np.sum(vert_diffs) > 0:  # if any column (dimension) has varying length across samples\n            self.max_seq_len = int(np.max(lengths[:, 0]))\n            logger.warning(\"Not all samples have same length: maximum length set to {}\".format(\n                self.max_seq_len))\n        else:\n            self.max_seq_len = lengths[0, 0]\n\n        # First create a (seq_len, feat_dim) dataframe for each sample, indexed by a single integer (\"ID\" of the sample)\n        # Then concatenate into a (num_samples * seq_len, feat_dim) dataframe, with multiple rows corresponding to the\n        # sample index (i.e. the same scheme as all datasets in this project)\n        '''\n        df = pd.concat((pd.DataFrame({col: df.loc[row, col] for col in df.columns}).reset_index(drop=True).set_index(\n            pd.Series(lengths[row, 0]*[row])) for row in range(df.shape[0])), axis=0)\n\n        # Replace NaN values\n        grp = df.groupby(by=df.index)\n        df = grp.transform(interpolate_missing)\n        '''\n\n        return df, labels_df\n\n\nclass SemicondTraceData(BaseData):\n    \"\"\"\n    Dataset class for semiconductor manufacturing sensor trace data.\n    Attributes:\n        all_df: (num_samples * seq_len, num_columns) dataframe indexed by integer indices, with multiple rows corresponding to the same index (sample).\n            Each row is a time step; Each column contains either metadata (e.g. timestamp) or a feature.\n        feature_df: (num_samples * seq_len, feat_dim) dataframe; contains the subset of columns of `all_df` which correspond to selected features\n        feature_names: names of columns contained in `feature_df` (same as feature_df.columns)\n        all_IDs: (num_samples,) series of IDs contained in `all_df`/`feature_df` (same as all_df.index.unique() )\n        labels_df: (num_samples, num_labels) pd.DataFrame of label(s) for each sample\n        max_seq_len: maximum sequence (time series) length. If None, script argument `max_seq_len` will be used.\n            (Moreover, script argument overrides this attribute)\n    \"\"\"\n    # TODO: currently all *numeric* features which are not *dataset-wise* constant are kept. Sample-wise constants are included\n    features = ['Actual Bias voltage (AT/CH2/RFGen/RFMatch.rMatchBias)', 'Actual Pressure (AT/CH2/PressCtrl.rPress)',\n                'Ampoule wafer count (AT/CH2/Gaspanel/Stick01/BUBBLER.cAmpouleWaferCount)',\n                'Ampoule wafer count (AT/CH2/Gaspanel/Stick05/BUBBLER.cAmpouleWaferCount)',\n                'Backside Flow Reading (AT/CH2/VacChuck.rBacksideFlow)',\n                'Backside Pressure Reading (AT/CH2/VacChuck.rBacksidePress)',\n                'Backside Pressure Setpoint (AT/CH2/VacChuck.wBacksidePressSP)',\n                'Bubbler ampoule accumulated flow (AT/CH2/Gaspanel/Stick01/BUBBLER.cAmpouleLifeAccFlow)',\n                'Bubbler ampoule accumulated flow (AT/CH2/Gaspanel/Stick05/BUBBLER.cAmpouleLifeAccFlow)',\n                'Current Flow (AT/CH2/Gaspanel/Stick01.rFlow)', 'Current Flow (AT/CH2/Gaspanel/Stick01/Mfc.rFlow)',\n                'Current Flow (AT/CH2/Gaspanel/Stick02.rFlow)', 'Current Flow (AT/CH2/Gaspanel/Stick02/Mfc.rFlow)',\n                'Current Flow (AT/CH2/Gaspanel/Stick03.rFlow)', 'Current Flow (AT/CH2/Gaspanel/Stick03/Mfc.rFlow)',\n                'Current Flow (AT/CH2/Gaspanel/Stick05.rFlow)', 'Current Flow (AT/CH2/Gaspanel/Stick05/Mfc.rFlow)',\n                'Current Flow (AT/CH2/Gaspanel/Stick06.rFlow)', 'Current Flow (AT/CH2/Gaspanel/Stick06/Mfc.rFlow)',\n                'Current Flow (AT/CH2/Gaspanel/Stick09.rFlow)', 'Current Flow (AT/CH2/Gaspanel/Stick09/Mfc.rFlow)',\n                'Current Flow (AT/CH2/Gaspanel/Stick21.rFlow)', 'Current Flow (AT/CH2/Gaspanel/Stick21/Mfc.rFlow)',\n                'Current Flow (AT/CH2/Gaspanel/Stick22.rFlow)', 'Current Flow (AT/CH2/Gaspanel/Stick22/Mfc.rFlow)',\n                'Current Position SP Percent (AT/CH2/PressCtrl.rPosSPP)', 'Current Power SP (AT/CH2/RFGen.rPowerSP)',\n                'Current Pressure in PSI (AT/CH2/Gaspanel/Stick01/Transducer.rPressure)',\n                'Current Pressure in PSI (AT/CH2/Gaspanel/Stick05/Transducer.rPressure)',\n                'Current Pressure in PSI (AT/CH2/Gaspanel/Stick08/Transducer.rPressure)',\n                'Current Pressure in PSI (AT/CH2/Gaspanel/Stick09/Transducer.rPressure)',\n                'Current Pressure in Torr (AT/CH2/Gaspanel/Stick01/Transducer.rPressureTorr)',\n                'Current Pressure in Torr (AT/CH2/Gaspanel/Stick05/Transducer.rPressureTorr)',\n                'Current Pressure in Torr (AT/CH2/Gaspanel/Stick08/Transducer.rPressureTorr)',\n                'Current Pressure in Torr (AT/CH2/Gaspanel/Stick09/Transducer.rPressureTorr)',\n                'Current Recipe Count (AT/CH2/Clean/Idle Purge.CurRcpCnt)',\n                'Current Recipe Count (AT/CH2/Clean/On Load Clean.CurRcpCnt)',\n                'Current recipe step number (AT/CH2.@RecipeStep01)',\n                'Current servo error  (AT/CH2/TempCtrl/Heater.rOutputCurrServoError)',\n                'Cycle Count (AT/CH2/Gaspanel/Stick01/Service/Cycle Purge By Pressure.cnfCycleCount)',\n                'Cycle Count (AT/CH2/Gaspanel/Stick01/Service/Cycle Purge By Time.cnfCycleCount)',\n                'Cycle Count (AT/CH2/Gaspanel/Stick05/Service/Cycle Purge By Pressure.cnfCycleCount)',\n                'Cycle Count (AT/CH2/Gaspanel/Stick05/Service/Cycle Purge By Time.cnfCycleCount)',\n                'Cycle Count (AT/CH2/Gaspanel/Stick08/Service/Cycle Purge By Pressure.cnfCycleCount)',\n                'Default temperature setpoint (AT/CH2/Watlow1/Ch_1.cDefaultSetpoint)',\n                'Default temperature setpoint (AT/CH2/Watlow1/Ch_2.cDefaultSetpoint)',\n                'Default temperature setpoint (AT/CH2/Watlow1/Ch_6.cDefaultSetpoint)',\n                'Default temperature setpoint (AT/CH2/Watlow2/Ch_4.cDefaultSetpoint)',\n                'Default temperature setpoint (AT/CH2/Watlow2/Ch_5.cDefaultSetpoint)',\n                'Estimated Ampoule wafer count (AT/CH2/Gaspanel/Stick05/BUBBLER.cEstAmpouleWaferCount)',\n                'Expected Lid Heater Temperature (AT/CH2/Rcp.wHdrLidHtrTemp)',\n                'Final Leak Check pressure (AT/CH2/Services/CVDLeakCheck/LeakCheck.rLeakCheckFinalPressure)',\n                'Final Leak Rate (AT/CH2/Services/CVDLeakCheck/LeakCheck.rFinalLeakRate)',\n                'Flow Setpoint (AT/CH2/Gaspanel/Stick02/Mfc.wSetpoint)',\n                'Flow Setpoint (AT/CH2/Gaspanel/Stick03/Mfc.wSetpoint)',\n                'Flow Setpoint (AT/CH2/Gaspanel/Stick05/Mfc.wSetpoint)',\n                'Flow Setpoint (AT/CH2/Gaspanel/Stick06/Mfc.wSetpoint)',\n                'Flow Setpoint (AT/CH2/Gaspanel/Stick09/Mfc.wSetpoint)',\n                'Flow Setpoint (AT/CH2/Gaspanel/Stick21/Mfc.wSetpoint)',\n                'Flow Setpoint (AT/CH2/Gaspanel/Stick22/Mfc.wSetpoint)',\n                'Next wafer slot, side 1 (AT/CH2.@NextCassSlot01_01)',\n                'Next wafer src, side 1 (AT/CH2.@NextCassId01_01)', 'Temp Reading  (AT/CH2/Watlow1/Ch_1.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow1/Ch_2.rTempReading)', 'Temp Reading  (AT/CH2/Watlow1/Ch_3.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow1/Ch_4.rTempReading)', 'Temp Reading  (AT/CH2/Watlow1/Ch_5.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow1/Ch_6.rTempReading)', 'Temp Reading  (AT/CH2/Watlow1/Ch_7.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow1/Ch_8.rTempReading)', 'Temp Reading  (AT/CH2/Watlow2/Ch_1.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow2/Ch_2.rTempReading)', 'Temp Reading  (AT/CH2/Watlow2/Ch_3.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow2/Ch_4.rTempReading)', 'Temp Reading  (AT/CH2/Watlow2/Ch_5.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow2/Ch_6.rTempReading)', 'Temp Reading  (AT/CH2/Watlow2/Ch_8.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow3/Ch_1.rTempReading)', 'Temp Reading  (AT/CH2/Watlow3/Ch_2.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow3/Ch_3.rTempReading)', 'Temp Reading  (AT/CH2/Watlow3/Ch_4.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow3/Ch_5.rTempReading)', 'Temp Reading  (AT/CH2/Watlow3/Ch_6.rTempReading)',\n                'Temp Reading  (AT/CH2/Watlow3/Ch_7.rTempReading)']\n\n    def __init__(self, root_dir, file_list=None, pattern=None, n_proc=8, limit_size=None, config=None):\n\n        self.set_num_processes(n_proc=n_proc)\n\n        # Get labels\n        wafer_measurements_path = os.path.join(root_dir, \"waferdata/\")\n        logger.info(\"Getting wafer measurements ...\")\n        # Dataframe which holds all measurements: mean thickness (and deposition rate for the subset \"type 1\"), roughness (std of thickness)\n        measurements_df = self.get_measurements(wafer_measurements_path)\n\n        # Get metadata (e.g. mapping between measurement file and trace file)\n        catalog_path = os.path.join(root_dir, \"CTF03.catalog.20200629.csv\")\n        logger.info(\"Getting wafer metadata ...\")\n        # This dataframe holds for all wafers all metadata per wafer (including measurements, when they exist, and corresponding trace file)\n        metadata_df = self.get_metadata(catalog_path, measurements_df)\n\n        # TODO: select subset here (e.g. here 20A wafers selected), or set file_list=None to use all\n        files_20A = metadata_df.loc[metadata_df['ChamberRecipeID']\n                                    == 'QUALCH2CO20', 'TraceDataFile']\n        IDs_20A = files_20A.index\n        files_20A = list(map(self.convert_tracefilename, files_20A))\n\n        # Get trace files\n        tracedata_dir = os.path.join(\n            root_dir, \"tracedata/CTF03_CH2_QUALCH2CO_CH2_G0009\")\n        logger.info(\"Getting sensor trace data ...\")\n        self.all_df = self.load_all(\n            tracedata_dir, file_list=files_20A, pattern=pattern)\n\n        self.all_IDs = self.all_df.index.unique()  # all sample (session) IDs\n\n        # TODO: select prediction objective here: any of ['Mean_dep_rate', 'std_thickness', 'mean_thickness']\n        if config['task'] == 'regression':\n            labels_col = config['labels'] if config['labels'] else 'Mean_dep_rate'\n            self.labels_df = pd.DataFrame(\n                metadata_df.loc[self.all_IDs, labels_col], dtype=np.float32)\n            self.labels_df = self.labels_df[~self.labels_df[labels_col].isna()]\n            self.all_IDs = self.labels_df.index\n            self.all_df = self.all_df.loc[self.all_IDs]\n\n        self.max_seq_len = 130  # TODO: for 20A\n\n        if (limit_size is not None) and (limit_size < len(self.all_IDs)):\n            if limit_size > 1:\n                limit_size = int(limit_size)\n            else:  # interpret as proportion if in (0, 1]\n                limit_size = int(limit_size * len(self.all_IDs))\n            self.all_IDs = self.all_IDs[:limit_size]\n            self.all_df = self.all_df.loc[self.all_IDs]\n\n        self.feature_names = SemicondTraceData.features\n        self.feature_df = self.all_df[self.feature_names]\n\n        # Replace NaN values (at this point due to some columns missing from some trace files)\n        if self.feature_df.isna().any().any():\n            self.feature_df = self.feature_df.fillna(0)\n\n        return\n\n    def make_pjid(self, toolID, pjID):\n        \"\"\"Convert PJID format of catalog file to the one used in measurement files\"\"\"\n        return toolID + '-' + pjID.split('.')[0]\n\n    def convert_tracefilename(self, filepath):\n        \"\"\"\n        This processing depends on how tracefiles are stored (flat directory hierarchy or not, .csv or .zip)\n        See retrieve_tracefiles.py for options.\n        Here, a flat hierarchy and .csv format is assumed\n        \"\"\"\n        filename, extension = os.path.splitext(os.path.basename(filepath))\n        return filename + '.csv'\n\n    def get_measurements(self, wafer_measurements_path):\n\n        # There are 2 \"types\" of files, the ones that start with \"Rate\" and contain mean deposition rate (type 1)\n        # and the ones that start with \"mCTF\" (type 2),\n        # which only contain mean thickness and fewer columns with different ID and measurement column name.\n        deprate_df1 = self.load_all(\n            wafer_measurements_path, pattern=\"Rate_time_series.*_Average_\", mode='simple')\n        deprate_df1 = deprate_df1.rename(\n            columns={\"Mea_value\": \"mean_thickness\"})\n\n        deprate_df2 = self.load_all(\n            wafer_measurements_path, pattern=r\"/mCTF.*_Average_\", mode='simple')\n        deprate_df2 = deprate_df2.rename(\n            columns={\"Wafer_mean\": \"mean_thickness\"})\n\n        # Merge the 2 types for deposition rate/thickness\n        deprate_df = pd.merge(deprate_df1, deprate_df2, how='outer', left_on=['Proc_cj_id', 'Wafer_id'],\n                              right_on=['Control_job_id', 'Wafer_id'],\n                              left_index=False, right_index=False, sort=True,\n                              suffixes=(None, '_right'), copy=True, indicator=True,\n                              validate=None)\n        # The 2 types contain overlapping sets of wafers, so we need to form a common measurement column\n        right_only = deprate_df['mean_thickness'].isnull()\n        deprate_df.loc[right_only,\n                       'mean_thickness'] = deprate_df.loc[right_only, 'mean_thickness_right']\n\n        # Repeat process for roughness (std of thickness) measurement files\n        roughness_df1 = self.load_all(\n            wafer_measurements_path, pattern=\"Rate_time_series.*_StdDev_\", mode='simple')\n        roughness_df1 = roughness_df1.rename(\n            columns={\"Std_dep_thk\": \"std_thickness\"})\n\n        roughness_df2 = self.load_all(\n            wafer_measurements_path, pattern=r\"/mCTF.*_StdDev_\", mode='simple')\n        roughness_df2 = roughness_df2.rename(\n            columns={\"Wafer_std\": \"std_thickness\"})\n\n        roughness_df = pd.merge(roughness_df1, roughness_df2, how='outer', left_on=['Proc_cj_id', 'Wafer_id'],\n                                right_on=['Control_job_id', 'Wafer_id'],\n                                left_index=False, right_index=False, sort=True,\n                                suffixes=(None, '_right'), copy=True, indicator=True,\n                                validate=None)\n\n        right_only = roughness_df['std_thickness'].isnull()\n        roughness_df.loc[right_only,\n                         'std_thickness'] = roughness_df.loc[right_only, 'std_thickness_right']\n\n        # Dataframe which holds all measurements: mean thickness (and deposition rate for the subset \"type 1\"), roughness (std of thickness)\n        measurements_df = pd.merge(deprate_df, roughness_df, how='inner', on=['Proc_cj_id', 'Wafer_id'],\n                                   left_index=False, right_index=False, sort=True,\n                                   suffixes=('_x', '_y'), copy=True, indicator=False,\n                                   validate=None)\n\n        assert sum(measurements_df.mean_thickness.isnull()\n                   ) == 0, \"Missing thickness measurements\"\n        assert sum(measurements_df.std_thickness.isnull()\n                   ) == 0, \"Missing roughness measurements\"\n\n        return measurements_df\n\n    def get_metadata(self, catalog_path, measurements_df):\n\n        catalog_df = pd.read_csv(catalog_path)\n        # Restrict to Chamber 2\n        catalog_df = catalog_df[catalog_df['ChamberID'] == 'CH2']\n        # Restrict to the recipes corresponding to existing measurememt wafers and associated product wafers\n        catalog_df = catalog_df[catalog_df['ChamberRecipeID'].isin(\n            ['QUALCH2CO20', 'QUALCH2CO100', 'CH2_G0009'])]\n        catalog_df['pjid'] = catalog_df[['ToolID', 'PJID']].apply(\n            lambda x: self.make_pjid(*x), axis=1)\n\n        # This dataframe holds all metadata per wafer (including measurements, when they exist, and corresponding trace file)\n        metadata_df = pd.merge(catalog_df, measurements_df, how='left', left_on=['pjid', 'WaferID'],\n                               right_on=['Proc_cj_id', 'Wafer_id'],\n                               left_index=False, right_index=False, sort=True,\n                               suffixes=('_x', '_y'), copy=True, indicator=False,\n                               validate=None)\n        metadata_df = metadata_df.set_index('WaferPassID')\n\n        return metadata_df\n\n    def load_all(self, root_dir, file_list=None, pattern=None, mode=None):\n        \"\"\"\n        Loads datasets from csv files contained in `root_dir` into a dataframe, optionally choosing from `pattern`\n        Args:\n            root_dir: directory containing all individual .csv files\n            file_list: optionally, provide a list of file paths within `root_dir` to consider.\n                Otherwise, entire `root_dir` contents will be used.\n            pattern: optionally, apply regex string to select subset of files\n            func: function to use for loading a single file\n        Returns:\n            all_df: a single (possibly concatenated) dataframe with all data corresponding to specified files\n        \"\"\"\n\n        # if func is None:\n        #     func = SemicondTraceData.load_single\n\n        # Select paths for training and evaluation\n        if file_list is None:\n            data_paths = glob.glob(os.path.join(\n                root_dir, '*'))  # list of all paths\n        else:\n            data_paths = [os.path.join(root_dir, p) for p in file_list]\n        if len(data_paths) == 0:\n            raise Exception('No files found using: {}'.format(\n                os.path.join(root_dir, '*')))\n\n        if pattern is None:\n            # by default evaluate on\n            selected_paths = data_paths\n        else:\n            selected_paths = list(\n                filter(lambda x: re.search(pattern, x), data_paths))\n\n        input_paths = [p for p in selected_paths if os.path.isfile(\n            p) and p.endswith('.csv')]\n        if len(input_paths) == 0:\n            raise Exception(\n                \"No .csv files found using pattern: '{}'\".format(pattern))\n\n        if (mode != 'simple') and (self.n_proc > 1):\n            # Load in parallel\n            # no more than file_names needed here\n            _n_proc = min(self.n_proc, len(input_paths))\n            logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(\n                len(input_paths), _n_proc))\n            with Pool(processes=_n_proc) as pool:\n                # done like this because multiprocessing needs the *explicit* function call\n                # and not a reference to a function, e.g. func = pd.read_csv\n                all_df = pd.concat(\n                    pool.map(SemicondTraceData.load_single, input_paths))\n        else:  # read 1 file at a time\n            if mode == 'simple':\n                all_df = pd.concat(pd.read_csv(path)\n                                   for path in tqdm(input_paths))\n            else:\n                all_df = pd.concat(SemicondTraceData.load_single(path)\n                                   for path in tqdm(input_paths))\n\n        return all_df\n\n    @staticmethod\n    def load_single(filepath):\n        df = SemicondTraceData.read_data(filepath)\n        df = SemicondTraceData.select_columns(df)\n\n        df['TimeStamp'] = pd.to_datetime(df['TimeStamp'])\n        df = df.sort_values(by=['WaferPassID', 'TimeStamp'])\n        df = df.set_index('WaferPassID')\n\n        # Replace NaN values (at this point, these are missing values in a variable/sequence)\n        # because some columns are missing in some tracefiles\n        feat_col = [\n            col for col in df.columns if col in SemicondTraceData.features]\n        if df[feat_col].isna().any().any():\n            grp = df.groupby(by=df.index)\n            df.loc[:, feat_col] = grp.transform(interpolate_missing)\n\n        return df\n\n    @staticmethod\n    def read_data(filepath):\n        \"\"\"Reads a single .csv, which typically contains a day of datasets of various weld sessions.\n        \"\"\"\n        df = pd.read_csv(filepath)\n        return df\n\n    @staticmethod\n    def select_columns(df):\n\n        # Kept just as an example\n        # df = df.rename(columns={\"per_energy\": \"power\"})\n        # # Sometimes 'diff_time' is not measured correctly (is 0), and power ('per_energy') becomes infinite\n        # is_error = df['power'] > 1e16\n        # df.loc[is_error, 'power'] = df.loc[is_error, 'true_energy'] / df['diff_time'].median()\n        # keep_cols = ['weld_record_index', 'wire_feed_speed', 'current', 'voltage', 'motor_current', 'power']\n        # df = df[keep_cols]\n\n        # This doesn't work because some columns are missing in some tracefiles\n        # keep_cols = ['WaferPassID', 'TimeStamp'] + SemicondTraceData.features\n        # df = df[keep_cols]\n\n        return df\n\n\nclass PMUData(BaseData):\n    \"\"\"\n    Dataset class for Phasor Measurement Unit dataset.\n    Attributes:\n        all_df: dataframe indexed by ID, with multiple rows corresponding to the same index (sample).\n            Each row is a time step; Each column contains either metadata (e.g. timestamp) or a feature.\n        feature_df: contains the subset of columns of `all_df` which correspond to selected features\n        feature_names: names of columns contained in `feature_df` (same as feature_df.columns)\n        all_IDs: IDs contained in `all_df`/`feature_df` (same as all_df.index.unique() )\n        max_seq_len: maximum sequence (time series) length (optional). Used only if script argument `max_seq_len` is not\n            defined.\n    \"\"\"\n\n    def __init__(self, root_dir, file_list=None, pattern=None, n_proc=1, limit_size=None, config=None):\n\n        self.set_num_processes(n_proc=n_proc)\n\n        self.all_df = self.load_all(\n            root_dir, file_list=file_list, pattern=pattern)\n\n        if config['data_window_len'] is not None:\n            self.max_seq_len = config['data_window_len']\n            # construct sample IDs: 0, 0, ..., 0, 1, 1, ..., 1, 2, ..., (num_whole_samples - 1)\n            # num_whole_samples = len(self.all_df) // self.max_seq_len  # commented code is for more general IDs\n            # IDs = list(chain.from_iterable(map(lambda x: repeat(x, self.max_seq_len), range(num_whole_samples + 1))))\n            # IDs = IDs[:len(self.all_df)]  # either last sample is completely superfluous, or it has to be shortened\n            IDs = [i // self.max_seq_len for i in range(self.all_df.shape[0])]\n            self.all_df.insert(loc=0, column='ExID', value=IDs)\n        else:\n            # self.all_df = self.all_df.sort_values(by=['ExID'])  # dataset is presorted\n            self.max_seq_len = 30\n\n        self.all_df = self.all_df.set_index('ExID')\n        # rename columns\n        self.all_df.columns = [re.sub(r'\\d+', str(i//3), col_name)\n                               for i, col_name in enumerate(self.all_df.columns[:])]\n        #self.all_df.columns = [\"_\".join(col_name.split(\" \")[:-1]) for col_name in self.all_df.columns[:]]\n        self.all_IDs = self.all_df.index.unique()  # all sample (session) IDs\n\n        if limit_size is not None:\n            if limit_size > 1:\n                limit_size = int(limit_size)\n            else:  # interpret as proportion if in (0, 1]\n                limit_size = int(limit_size * len(self.all_IDs))\n            self.all_IDs = self.all_IDs[:limit_size]\n            self.all_df = self.all_df.loc[self.all_IDs]\n\n        self.feature_names = self.all_df.columns  # all columns are used as features\n        self.feature_df = self.all_df[self.feature_names]\n\n    def load_all(self, root_dir, file_list=None, pattern=None):\n        \"\"\"\n        Loads datasets from csv files contained in `root_dir` into a dataframe, optionally choosing from `pattern`\n        Args:\n            root_dir: directory containing all individual .csv files\n            file_list: optionally, provide a list of file paths within `root_dir` to consider.\n                Otherwise, entire `root_dir` contents will be used.\n            pattern: optionally, apply regex string to select subset of files\n        Returns:\n            all_df: a single (possibly concatenated) dataframe with all data corresponding to specified files\n        \"\"\"\n\n        # Select paths for training and evaluation\n        if file_list is None:\n            data_paths = glob.glob(os.path.join(\n                root_dir, '*'))  # list of all paths\n        else:\n            data_paths = [os.path.join(root_dir, p) for p in file_list]\n        if len(data_paths) == 0:\n            raise Exception('No files found using: {}'.format(\n                os.path.join(root_dir, '*')))\n\n        if pattern is None:\n            # by default evaluate on\n            selected_paths = data_paths\n        else:\n            selected_paths = list(\n                filter(lambda x: re.search(pattern, x), data_paths))\n\n        input_paths = [p for p in selected_paths if os.path.isfile(\n            p) and p.endswith('.csv')]\n        if len(input_paths) == 0:\n            raise Exception(\n                \"No .csv files found using pattern: '{}'\".format(pattern))\n\n        if self.n_proc > 1:\n            # Load in parallel\n            # no more than file_names needed here\n            _n_proc = min(self.n_proc, len(input_paths))\n            logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(\n                len(input_paths), _n_proc))\n            with Pool(processes=_n_proc) as pool:\n                all_df = pd.concat(pool.map(PMUData.load_single, input_paths))\n        else:  # read 1 file at a time\n            all_df = pd.concat(PMUData.load_single(path)\n                               for path in input_paths)\n\n        return all_df\n\n    @staticmethod\n    def load_single(filepath):\n        df = PMUData.read_data(filepath)\n        #df = PMUData.select_columns(df)\n        num_nan = df.isna().sum().sum()\n        if num_nan > 0:\n            logger.warning(\n                \"{} nan values in {} will be replaced by 0\".format(num_nan, filepath))\n            df = df.fillna(0)\n\n        return df\n\n    @staticmethod\n    def read_data(filepath):\n        \"\"\"Reads a single .csv, which typically contains a day of datasets of various weld sessions.\n        \"\"\"\n        df = pd.read_csv(filepath)\n        return df\n\n\ndata_factory = {'weld': WeldData,\n                'hdd': HDD_data,\n                'tsra': TSRegressionArchive,\n                'semicond': SemicondTraceData,\n                'pmu': PMUData}\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/datasets/dataset.py",
    "content": "import numpy as np\nfrom torch.utils.data import Dataset\nimport torch\n\n\nclass ImputationDataset(Dataset):\n    \"\"\"Dynamically computes missingness (noise) mask for each sample\"\"\"\n\n    def __init__(self, data, indices, mean_mask_length=3, masking_ratio=0.15,\n                 mode='separate', distribution='geometric', exclude_feats=None, device=None, feature_df=None):\n        super(ImputationDataset, self).__init__()\n\n        self.data = data  # this is a subclass of the BaseData class in data.py\n        self.IDs = indices  # list of data IDs, but also mapping between integer index and ID\n        if feature_df is not None:\n            self.feature_df = feature_df\n        else:\n            self.feature_df = self.data.feature_df.loc[self.IDs]\n\n        self.masking_ratio = masking_ratio\n        self.mean_mask_length = mean_mask_length\n        self.mode = mode\n        self.distribution = distribution\n        self.exclude_feats = exclude_feats\n        self.device = device\n\n    def __getitem__(self, ind):\n        \"\"\"\n        For a given integer index, returns the corresponding (seq_length, feat_dim) array and a noise mask of same shape\n        Args:\n            ind: integer index of sample in dataset\n        Returns:\n            X: (seq_length, feat_dim) tensor of the multivariate time series corresponding to a sample\n            mask: (seq_length, feat_dim) boolean tensor: 0s mask and predict, 1s: unaffected input\n            ID: ID of sample\n        \"\"\"\n\n        # X = self.feature_df.loc[self.IDs[ind]].values  # (seq_length, feat_dim) array\n        X = self.feature_df.loc[ind].values\n        mask = noise_mask(X, self.masking_ratio, self.mean_mask_length, self.mode, self.distribution,\n                          self.exclude_feats)  # (seq_length, feat_dim) boolean array\n\n        return torch.from_numpy(X), torch.from_numpy(mask), self.IDs[ind]\n\n    def update(self):\n        self.mean_mask_length = min(20, self.mean_mask_length + 1)\n        self.masking_ratio = min(1, self.masking_ratio + 0.05)\n\n    def __len__(self):\n        return len(self.IDs)\n\n\nclass TransductionDataset(Dataset):\n\n    def __init__(self, data, indices, mask_feats, start_hint=0.0, end_hint=0.0):\n        super(TransductionDataset, self).__init__()\n\n        self.data = data  # this is a subclass of the BaseData class in data.py\n        self.IDs = indices  # list of data IDs, but also mapping between integer index and ID\n        self.feature_df = self.data.feature_df.loc[self.IDs]\n\n        # list/array of indices corresponding to features to be masked\n        self.mask_feats = mask_feats\n        # proportion at beginning of time series which will not be masked\n        self.start_hint = start_hint\n        # end_hint: proportion at the end of time series which will not be masked\n        self.end_hint = end_hint\n\n    def __getitem__(self, ind):\n        \"\"\"\n        For a given integer index, returns the corresponding (seq_length, feat_dim) array and a noise mask of same shape\n        Args:\n            ind: integer index of sample in dataset\n        Returns:\n            X: (seq_length, feat_dim) tensor of the multivariate time series corresponding to a sample\n            mask: (seq_length, feat_dim) boolean tensor: 0s mask and predict, 1s: unaffected input\n            ID: ID of sample\n        \"\"\"\n\n        # (seq_length, feat_dim) array\n        X = self.feature_df.loc[self.IDs[ind]].values\n        mask = transduct_mask(X, self.mask_feats, self.start_hint,\n                              self.end_hint)  # (seq_length, feat_dim) boolean array\n\n        return torch.from_numpy(X), torch.from_numpy(mask), self.IDs[ind]\n\n    def update(self):\n        self.start_hint = max(0, self.start_hint - 0.1)\n        self.end_hint = max(0, self.end_hint - 0.1)\n\n    def __len__(self):\n        return len(self.IDs)\n\n\ndef collate_superv(data, max_len=None, device=None):\n    \"\"\"Build mini-batch tensors from a list of (X, mask) tuples. Mask input. Create\n    Args:\n        data: len(batch_size) list of tuples (X, y).\n            - X: torch tensor of shape (seq_length, feat_dim); variable seq_length.\n            - y: torch tensor of shape (num_labels,) : class indices or numerical targets\n                (for classification or regression, respectively). num_labels > 1 for multi-task models\n        max_len: global fixed sequence length. Used for architectures requiring fixed length input,\n            where the batch length cannot vary dynamically. Longer sequences are clipped, shorter are padded with 0s\n    Returns:\n        X: (batch_size, padded_length, feat_dim) torch tensor of masked features (input)\n        targets: (batch_size, padded_length, feat_dim) torch tensor of unmasked features (output)\n        target_masks: (batch_size, padded_length, feat_dim) boolean torch tensor\n            0 indicates masked values to be predicted, 1 indicates unaffected/\"active\" feature values\n        padding_masks: (batch_size, padded_length) boolean tensor, 1 means keep vector at this position, 0 means padding\n    \"\"\"\n\n    batch_size = len(data)\n    features, labels, IDs = zip(*data)  # origin: , IDs\n\n    # Stack and pad features and masks (convert 2D to 3D tensors, i.e. add batch dimension)\n    # original sequence length for each time series\n    lengths = [X.shape[0] for X in features]\n    if max_len is None:\n        max_len = max(lengths)\n    # (batch_size, padded_length, feat_dim)\n    X = torch.zeros(batch_size, max_len, features[0].shape[-1])\n    for i in range(batch_size):\n        end = min(lengths[i], max_len)\n        X[i, :end, :] = features[i][:end, :]\n\n    targets = torch.stack(labels, dim=0)  # (batch_size, num_labels)\n\n    padding_masks = padding_mask(torch.tensor(lengths, dtype=torch.int16),\n                                 max_len=max_len)  # (batch_size, padded_length) boolean tensor, \"1\" means keep\n\n    return X, targets, padding_masks, IDs\n\n\nclass ClassiregressionDataset(Dataset):\n\n    def __init__(self, data, indices, device=None, feature_df=None):\n        super(ClassiregressionDataset, self).__init__()\n\n        self.data = data  # this is a subclass of the BaseData class in data.py\n        self.IDs = indices  # list of data IDs, but also mapping between integer index and ID\n        if feature_df is None:\n            self.feature_df = self.data.feature_df.loc[self.IDs]\n        else:\n            self.feature_df = feature_df\n\n        self.labels_df = self.data.labels_df.loc[self.IDs]\n        self.device = device\n        num_data = len(self.IDs)\n        '''\n        self.flatten_x = torch.from_numpy(np.array(self.feature_df).reshape((num_data, -1))).to(self.device)\n        self.flatten_y = torch.from_numpy(np.array(self.labels_df.loc[indices]).reshape((num_data,))).to(self.device)\n        '''\n\n    def __getitem__(self, ind):\n        \"\"\"\n        For a given integer index, returns the corresponding (seq_length, feat_dim) array and a noise mask of same shape\n        Args:\n            ind: integer index of sample in dataset\n        Returns:\n            X: (seq_length, feat_dim) tensor of the multivariate time series corresponding to a sample\n            y: (num_labels,) tensor of labels (num_labels > 1 for multi-task models) for each sample\n            ID: ID of sample\n        \"\"\"\n\n        # X = self.feature_df.loc[self.IDs[ind]].values  # (seq_length, feat_dim) array\n        X = self.feature_df.loc[ind].values\n        y = self.labels_df.loc[self.IDs[ind]].values  # (num_labels,) array\n\n        return torch.from_numpy(X), torch.from_numpy(y), self.IDs[ind]\n\n        # return self.flatten_x[ind], self.flatten_y[ind]\n\n    def __len__(self):\n        return len(self.IDs)\n\n\ndef transduct_mask(X, mask_feats, start_hint=0.0, end_hint=0.0):\n    \"\"\"\n    Creates a boolean mask of the same shape as X, with 0s at places where a feature should be masked.\n    Args:\n        X: (seq_length, feat_dim) numpy array of features corresponding to a single sample\n        mask_feats: list/array of indices corresponding to features to be masked\n        start_hint:\n        end_hint: proportion at the end of time series which will not be masked\n\n    Returns:\n        boolean numpy array with the same shape as X, with 0s at places where a feature should be masked\n    \"\"\"\n\n    mask = np.ones(X.shape, dtype=bool)\n    start_ind = int(start_hint * X.shape[0])\n    end_ind = max(start_ind, int((1 - end_hint) * X.shape[0]))\n    mask[start_ind:end_ind, mask_feats] = 0\n\n    return mask\n\n\ndef compensate_masking(X, mask):\n    \"\"\"\n    Compensate feature vectors after masking values, in a way that the matrix product W @ X would not be affected on average.\n    If p is the proportion of unmasked (active) elements, X' = X / p = X * feat_dim/num_active\n    Args:\n        X: (batch_size, seq_length, feat_dim) torch tensor\n        mask: (batch_size, seq_length, feat_dim) torch tensor: 0s means mask and predict, 1s: unaffected (active) input\n    Returns:\n        (batch_size, seq_length, feat_dim) compensated features\n    \"\"\"\n\n    # number of unmasked elements of feature vector for each time step\n    # (batch_size, seq_length, 1)\n    num_active = torch.sum(mask, dim=-1).unsqueeze(-1)\n    # to avoid division by 0, set the minimum to 1\n    num_active = torch.max(num_active, torch.ones(\n        num_active.shape, dtype=torch.int16))  # (batch_size, seq_length, 1)\n    return X.shape[-1] * X / num_active\n\n\ndef collate_unsuperv(data, max_len=None, mask_compensation=False):\n    \"\"\"Build mini-batch tensors from a list of (X, mask) tuples. Mask input. Create\n    Args:\n        data: len(batch_size) list of tuples (X, mask).\n            - X: torch tensor of shape (seq_length, feat_dim); variable seq_length.\n            - mask: boolean torch tensor of shape (seq_length, feat_dim); variable seq_length.\n        max_len: global fixed sequence length. Used for architectures requiring fixed length input,\n            where the batch length cannot vary dynamically. Longer sequences are clipped, shorter are padded with 0s\n    Returns:\n        X: (batch_size, padded_length, feat_dim) torch tensor of masked features (input)\n        targets: (batch_size, padded_length, feat_dim) torch tensor of unmasked features (output)\n        target_masks: (batch_size, padded_length, feat_dim) boolean torch tensor\n            0 indicates masked values to be predicted, 1 indicates unaffected/\"active\" feature values\n        padding_masks: (batch_size, padded_length) boolean tensor, 1 means keep vector at this position, 0 ignore (padding)\n    \"\"\"\n\n    batch_size = len(data)\n    features, masks, IDs = zip(*data)\n\n    # Stack and pad features and masks (convert 2D to 3D tensors, i.e. add batch dimension)\n    # original sequence length for each time series\n    lengths = [X.shape[0] for X in features]\n    if max_len is None:\n        max_len = max(lengths)\n    # (batch_size, padded_length, feat_dim)\n    X = torch.zeros(batch_size, max_len, features[0].shape[-1])\n    target_masks = torch.zeros_like(X,\n                                    dtype=torch.bool)  # (batch_size, padded_length, feat_dim) masks related to objective\n    for i in range(batch_size):\n        end = min(lengths[i], max_len)\n        X[i, :end, :] = features[i][:end, :]\n        target_masks[i, :end, :] = masks[i][:end, :]\n\n    targets = X.clone()\n    X = X * target_masks  # mask input\n    if mask_compensation:\n        X = compensate_masking(X, target_masks)\n\n    # (batch_size, padded_length) boolean tensor, \"1\" means keep\n    padding_masks = padding_mask(torch.tensor(\n        lengths, dtype=torch.int16), max_len=max_len)\n    target_masks = ~target_masks  # inverse logic: 0 now means ignore, 1 means predict\n    return X, targets, target_masks, padding_masks, IDs\n\n\ndef noise_mask(X, masking_ratio, lm=3, mode='separate', distribution='geometric', exclude_feats=None):\n    \"\"\"\n    Creates a random boolean mask of the same shape as X, with 0s at places where a feature should be masked.\n    Args:\n        X: (seq_length, feat_dim) numpy array of features corresponding to a single sample\n        masking_ratio: proportion of seq_length to be masked. At each time step, will also be the proportion of\n            feat_dim that will be masked on average\n        lm: average length of masking subsequences (streaks of 0s). Used only when `distribution` is 'geometric'.\n        mode: whether each variable should be masked separately ('separate'), or all variables at a certain positions\n            should be masked concurrently ('concurrent')\n        distribution: whether each mask sequence element is sampled independently at random, or whether\n            sampling follows a markov chain (and thus is stateful), resulting in geometric distributions of\n            masked squences of a desired mean length `lm`\n        exclude_feats: iterable of indices corresponding to features to be excluded from masking (i.e. to remain all 1s)\n\n    Returns:\n        boolean numpy array with the same shape as X, with 0s at places where a feature should be masked\n    \"\"\"\n    if exclude_feats is not None:\n        exclude_feats = set(exclude_feats)\n\n    if distribution == 'geometric':  # stateful (Markov chain)\n        if mode == 'separate':  # each variable (feature) is independent\n            mask = np.ones(X.shape, dtype=bool)\n            for m in range(X.shape[1]):  # feature dimension\n                if exclude_feats is None or m not in exclude_feats:\n                    mask[:, m] = geom_noise_mask_single(\n                        X.shape[0], lm, masking_ratio)  # time dimension\n        # replicate across feature dimension (mask all variables at the same positions concurrently)\n        else:\n            mask = np.tile(np.expand_dims(geom_noise_mask_single(\n                X.shape[0], lm, masking_ratio), 1), X.shape[1])\n    else:  # each position is independent Bernoulli with p = 1 - masking_ratio\n        if mode == 'separate':\n            mask = np.random.choice(np.array([True, False]), size=X.shape, replace=True,\n                                    p=(1 - masking_ratio, masking_ratio))\n        else:\n            mask = np.tile(np.random.choice(np.array([True, False]), size=(X.shape[0], 1), replace=True,\n                                            p=(1 - masking_ratio, masking_ratio)), X.shape[1])\n\n    return mask\n\n\ndef geom_noise_mask_single(L, lm, masking_ratio):\n    \"\"\"\n    Randomly create a boolean mask of length `L`, consisting of subsequences of average length lm, masking with 0s a `masking_ratio`\n    proportion of the sequence L. The length of masking subsequences and intervals follow a geometric distribution.\n    Args:\n        L: length of mask and sequence to be masked\n        lm: average length of masking subsequences (streaks of 0s)\n        masking_ratio: proportion of L to be masked\n\n    Returns:\n        (L,) boolean numpy array intended to mask ('drop') with 0s a sequence of length L\n    \"\"\"\n    keep_mask = np.ones(L, dtype=bool)\n    # probability of each masking sequence stopping. parameter of geometric distribution.\n    p_m = 1 / lm\n    # probability of each unmasked sequence stopping. parameter of geometric distribution.\n    p_u = p_m * masking_ratio / (1 - masking_ratio)\n    p = [p_m, p_u]\n\n    # Start in state 0 with masking_ratio probability\n    # state 0 means masking, 1 means not masking\n    state = int(np.random.rand() > masking_ratio)\n    for i in range(L):\n        # here it happens that state and masking value corresponding to state are identical\n        keep_mask[i] = state\n        if np.random.rand() < p[state]:\n            state = 1 - state\n\n    return keep_mask\n\n\ndef padding_mask(lengths, max_len=None):\n    \"\"\"\n    Used to mask padded positions: creates a (batch_size, max_len) boolean mask from a tensor of sequence lengths,\n    where 1 means keep element at this position (time step)\n    \"\"\"\n    batch_size = lengths.numel()\n    # trick works because of overloading of 'or' operator for non-boolean types\n    max_len = max_len or lengths.max_val()\n    return (torch.arange(0, max_len, device=lengths.device)\n            .type_as(lengths)\n            .repeat(batch_size, 1)\n            .lt(lengths.unsqueeze(1)))\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/datasets/datasplit.py",
    "content": "import numpy as np\nfrom sklearn import model_selection\n\n\ndef split_dataset(data_indices, validation_method, n_splits, validation_ratio, test_set_ratio=0,\n                  test_indices=None,\n                  random_seed=42, labels=None, ith=None):\n    \"\"\"\n    Splits dataset (i.e. the global datasets indices) into a test set and a training/validation set.\n    The training/validation set is used to produce `n_splits` different configurations/splits of indices.\n\n    Returns:\n        test_indices: numpy array containing the global datasets indices corresponding to the test set\n            (empty if test_set_ratio is 0 or None)\n        train_indices: iterable of `n_splits` (num. of folds) numpy arrays,\n            each array containing the global datasets indices corresponding to a fold's training set\n        val_indices: iterable of `n_splits` (num. of folds) numpy arrays,\n            each array containing the global datasets indices corresponding to a fold's validation set\n    \"\"\"\n\n    # Set aside test set, if explicitly defined\n    '''\n    if test_indices is not None:\n        data_indices = np.array([ind for ind in data_indices if ind not in set(test_indices)])  # to keep initial order\n    '''\n    datasplitter = DataSplitter.factory(\n        validation_method, data_indices, labels, ith)  # DataSplitter object\n\n    # Set aside a random partition of all data as a test set\n\n    '''\n    if test_indices is None:\n        #if test_set_ratio:  # only if test set not explicitly defined\n        datasplitter.split_testset(test_ratio=test_set_ratio, random_state=random_seed)\n        test_indices = datasplitter.test_indices\n    else:\n        test_indices = []\n    '''\n\n    # Split train / validation sets\n    # TODO directly split the test set, ignore the val set\n    datasplitter.split_testset(\n        test_ratio=test_set_ratio, random_state=random_seed)\n    datasplitter.split_validation(\n        n_splits, validation_ratio, random_state=random_seed)\n\n    return datasplitter.train_indices, datasplitter.val_indices, datasplitter.test_indices\n\n\nclass DataSplitter(object):\n    \"\"\"Factory class, constructing subclasses based on feature type\"\"\"\n\n    def __init__(self, data_indices, data_labels=None, ith=None):\n        \"\"\"data_indices = train_val_indices | test_indices\"\"\"\n\n        self.data_indices = data_indices  # global datasets indices\n        self.data_labels = data_labels  # global raw datasets labels\n        # global non-test indices (training and validation)\n        self.train_val_indices = np.copy(self.data_indices)\n        self.test_indices = []  # global test indices\n        self.ith = ith\n        if data_labels is not None:\n            self.train_val_labels = np.copy(\n                self.data_labels)  # global non-test labels (includes training and validation)\n            self.test_labels = []  # global test labels # TODO: maybe not needed\n\n    @staticmethod\n    def factory(split_type, *args, **kwargs):\n        if split_type == \"StratifiedShuffleSplit\":\n            return StratifiedShuffleSplitter(*args, **kwargs)\n        if split_type == \"ShuffleSplit\":\n            return ShuffleSplitter(*args, **kwargs)\n        if split_type == \"StratifiedKFold\":\n            return StratifiedKFoldSplitter(*args, **kwargs)\n        else:\n            raise ValueError(\n                \"DataSplitter for '{}' does not exist\".format(split_type))\n\n    def split_testset(self, test_ratio, random_state=1337):\n        \"\"\"\n        Input:\n            test_ratio: ratio of test set with respect to the entire dataset. Should result in an absolute number of\n                samples which is greater or equal to the number of classes\n        Returns:\n            test_indices: numpy array containing the global datasets indices corresponding to the test set\n            test_labels: numpy array containing the labels corresponding to the test set\n        \"\"\"\n\n        raise NotImplementedError(\"Please override function in child class\")\n\n    def split_validation(self):\n        \"\"\"\n        Returns:\n            train_indices: iterable of n_splits (num. of folds) numpy arrays,\n                each array containing the global datasets indices corresponding to a fold's training set\n            val_indices: iterable of n_splits (num. of folds) numpy arrays,\n                each array containing the global datasets indices corresponding to a fold's validation set\n        \"\"\"\n\n        raise NotImplementedError(\"Please override function in child class\")\n\n# TODO add a k-fold, 在splitdataset里加一个index，每次运行取数据都会保证取到第index份\n# 1.测试设置random_state是否能保证每次的shuffle结果一致；\n# 2.在argparser中加入一个index参数，即每个数据集用5个脚本训练\n# 3.写完StratifiedKFold并在splitdataset里加index\n# 4. ith starts from 0\n\n\n'''\n    remove the ith \n'''\n\n\nclass StratifiedKFoldSplitter(DataSplitter):\n    def split_testset(self, test_ratio, random_state=42):\n        splitter = model_selection.StratifiedKFold(\n            n_splits=5, shuffle=True, random_state=random_state)\n        train_val_indices = None\n        test_indices = None\n        for i, (raw, test) in enumerate(splitter.split(X=np.zeros(len(self.data_indices)), y=self.data_labels)):\n            train_val_indices = np.array(raw, dtype=np.int64)\n            test_indices = np.array(test, dtype=np.int64)\n\n            if self.ith == i:\n                break\n\n        self.data_labels = np.array(self.data_labels)\n        self.data_indices = np.array(self.data_indices)\n        train_val_indices = np.array(train_val_indices)\n        test_indices = np.array(test_indices)\n\n        self.train_val_indices, self.train_val_labels = self.data_indices[\n            train_val_indices], self.data_labels[train_val_indices]\n        self.test_indices, self.test_labels = self.data_indices[\n            test_indices], self.data_labels[test_indices]\n\n        return\n\n    def split_validation(self, n_splits, validation_ratio, random_state=42):\n        splitter = model_selection.StratifiedKFold(\n            n_splits=4, shuffle=True, random_state=random_state)\n\n        '''\n        train_indices, val_indices = next(splitter.split(X=np.zeros(len(self.train_val_labels)), y=self.train_val_labels))\n\n        train_indices = np.array(train_indices, dtype=np.int64)\n        val_indices = np.array(val_indices, dtype=np.int64)\n        self.train_indices = self.train_val_indices[train_indices] \n        self.val_indices = self.train_val_indices[val_indices]\n        '''\n        train_indices, val_indices = zip(\n            *splitter.split(X=np.zeros(len(self.train_val_labels)), y=self.train_val_labels))\n        # return global datasets indices per fold\n        #print(train_indices[0].shape, train_indices[1].shape, train_indices[2].shape, train_indices[3].shape)\n\n        self.train_indices = self.train_val_indices[train_indices[0]]\n        self.val_indices = self.train_val_indices[val_indices[0]]\n        return\n\n\nclass StratifiedShuffleSplitter(DataSplitter):\n    \"\"\"\n    Returns randomized shuffled folds, which preserve the class proportions of samples in each fold. Differs from k-fold\n    in that not all samples are evaluated, and samples may be shared across validation sets,\n    which becomes more probable proportionally to validation_ratio/n_splits.\n    \"\"\"\n\n    def split_testset(self, test_ratio, random_state=1337):\n        \"\"\"\n        Input:\n            test_ratio: ratio of test set with respect to the entire dataset. Should result in an absolute number of\n                samples which is greater or equal to the number of classes\n        Returns:\n            test_indices: numpy array containing the global datasets indices corresponding to the test set\n            test_labels: numpy array containing the labels corresponding to the test set\n        \"\"\"\n\n        splitter = model_selection.StratifiedShuffleSplit(\n            n_splits=1, test_size=test_ratio, random_state=random_state)\n        # get local indices, i.e. indices in [0, len(data_labels))\n        train_val_indices, test_indices = next(splitter.split(\n            X=np.zeros(len(self.data_indices)), y=self.data_labels))\n        # return global datasets indices and labels\n        self.train_val_indices, self.train_val_labels = self.data_indices[\n            train_val_indices], self.data_labels[train_val_indices]\n        self.test_indices, self.test_labels = self.data_indices[\n            test_indices], self.data_labels[test_indices]\n\n        return\n\n    def split_validation(self, n_splits, validation_ratio, random_state=1337):\n        \"\"\"\n        Input:\n            n_splits: number of different, randomized and independent from one-another folds\n            validation_ratio: ratio of validation set with respect to the entire dataset. Should result in an absolute number of\n                samples which is greater or equal to the number of classes\n        Returns:\n            train_indices: iterable of n_splits (num. of folds) numpy arrays,\n                each array containing the global datasets indices corresponding to a fold's training set\n            val_indices: iterable of n_splits (num. of folds) numpy arrays,\n                each array containing the global datasets indices corresponding to a fold's validation set\n        \"\"\"\n\n        splitter = model_selection.StratifiedShuffleSplit(n_splits=n_splits, test_size=validation_ratio,\n                                                          random_state=random_state)\n        # get local indices, i.e. indices in [0, len(train_val_labels)), per fold\n        train_indices, val_indices = zip(\n            *splitter.split(X=np.zeros(len(self.train_val_labels)), y=self.train_val_labels))\n        # return global datasets indices per fold\n        self.train_indices = [self.train_val_indices[fold_indices]\n                              for fold_indices in train_indices]\n        self.val_indices = [self.train_val_indices[fold_indices]\n                            for fold_indices in val_indices]\n\n        return\n\n\nclass ShuffleSplitter(DataSplitter):\n    \"\"\"\n    Returns randomized shuffled folds without requiring or taking into account the sample labels. Differs from k-fold\n    in that not all samples are evaluated, and samples may be shared across validation sets,\n    which becomes more probable proportionally to validation_ratio/n_splits.\n    \"\"\"\n\n    def split_testset(self, test_ratio, random_state=1337):\n        \"\"\"\n        Input:\n            test_ratio: ratio of test set with respect to the entire dataset. Should result in an absolute number of\n                samples which is greater or equal to the number of classes\n        Returns:\n            test_indices: numpy array containing the global datasets indices corresponding to the test set\n            test_labels: numpy array containing the labels corresponding to the test set\n        \"\"\"\n\n        splitter = model_selection.ShuffleSplit(\n            n_splits=1, test_size=test_ratio, random_state=random_state)\n        # get local indices, i.e. indices in [0, len(data_indices))\n        train_val_indices, test_indices = next(\n            splitter.split(X=np.zeros(len(self.data_indices))))\n        # return global datasets indices and labels\n        self.train_val_indices = self.data_indices[train_val_indices]\n        self.test_indices = self.data_indices[test_indices]\n        if self.data_labels is not None:\n            self.train_val_labels = self.data_labels[train_val_indices]\n            self.test_labels = self.data_labels[test_indices]\n\n        return\n\n    def split_validation(self, n_splits, validation_ratio, random_state=1337):\n        \"\"\"\n        Input:\n            n_splits: number of different, randomized and independent from one-another folds\n            validation_ratio: ratio of validation set with respect to the entire dataset. Should result in an absolute number of\n                samples which is greater or equal to the number of classes\n        Returns:\n            train_indices: iterable of n_splits (num. of folds) numpy arrays,\n                each array containing the global datasets indices corresponding to a fold's training set\n            val_indices: iterable of n_splits (num. of folds) numpy arrays,\n                each array containing the global datasets indices corresponding to a fold's validation set\n        \"\"\"\n\n        splitter = model_selection.ShuffleSplit(n_splits=n_splits, test_size=validation_ratio,\n                                                random_state=random_state)\n        # get local indices, i.e. indices in [0, len(train_val_labels)), per fold\n        train_indices, val_indices = zip(\n            *splitter.split(X=np.zeros(len(self.train_val_indices))))\n        # return global datasets indices per fold\n        self.train_indices = [self.train_val_indices[fold_indices]\n                              for fold_indices in train_indices]\n        self.val_indices = [self.train_val_indices[fold_indices]\n                            for fold_indices in val_indices]\n\n        return\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/datasets/utils.py",
    "content": "\"\"\"\nCode to load Time Series Regression datasets. From:\nhttps://github.com/ChangWeiTan/TSRegression/blob/master/utils\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom tqdm import tqdm\n\nregression_datasets = [\"AustraliaRainfall\",\n                       \"HouseholdPowerConsumption1\",\n                       \"HouseholdPowerConsumption2\",\n                       \"BeijingPM25Quality\",\n                       \"BeijingPM10Quality\",\n                       \"Covid3Month\",\n                       \"LiveFuelMoistureContent\",\n                       \"FloodModeling1\",\n                       \"FloodModeling2\",\n                       \"FloodModeling3\",\n                       \"AppliancesEnergy\",\n                       \"BenzeneConcentration\",\n                       \"NewsHeadlineSentiment\",\n                       \"NewsTitleSentiment\",\n                       \"BIDMC32RR\",\n                       \"BIDMC32HR\",\n                       \"BIDMC32SpO2\",\n                       \"IEEEPPG\",\n                       \"PPGDalia\"]\n\n\ndef uniform_scaling(data, max_len):\n    \"\"\"\n    This is a function to scale the time series uniformly\n    :param data:\n    :param max_len:\n    :return:\n    \"\"\"\n    seq_len = len(data)\n    scaled_data = [data[int(j * seq_len / max_len)] for j in range(max_len)]\n\n    return scaled_data\n\n\n# The following code is adapted from the python package sktime to read .ts file.\nclass TsFileParseException(Exception):\n    \"\"\"\n    Should be raised when parsing a .ts file and the format is incorrect.\n    \"\"\"\n    pass\n\n\ndef load_from_tsfile_to_dataframe(full_file_path_and_name, return_separate_X_and_y=True,\n                                  replace_missing_vals_with='NaN'):\n    \"\"\"Loads data from a .ts file into a Pandas DataFrame.\n\n    Parameters\n    ----------\n    full_file_path_and_name: str\n        The full pathname of the .ts file to read.\n    return_separate_X_and_y: bool\n        true if X and Y values should be returned as separate Data Frames (X) and a numpy array (y), false otherwise.\n        This is only relevant for data that\n    replace_missing_vals_with: str\n       The value that missing values in the text file should be replaced with prior to parsing.\n\n    Returns\n    -------\n    DataFrame, ndarray\n        If return_separate_X_and_y then a tuple containing a DataFrame and a numpy array containing the relevant time-series and corresponding class values.\n    DataFrame\n        If not return_separate_X_and_y then a single DataFrame containing all time-series and (if relevant) a column \"class_vals\" the associated class values.\n    \"\"\"\n\n    # Initialize flags and variables used when parsing the file\n    metadata_started = False\n    data_started = False\n\n    has_problem_name_tag = False\n    has_timestamps_tag = False\n    has_univariate_tag = False\n    has_class_labels_tag = False\n    has_target_labels_tag = False\n    has_data_tag = False\n\n    previous_timestamp_was_float = None\n    previous_timestamp_was_int = None\n    previous_timestamp_was_timestamp = None\n    num_dimensions = None\n    is_first_case = True\n    instance_list = []\n    class_val_list = []\n    line_num = 0\n\n    # Parse the file\n    # print(full_file_path_and_name)\n    with open(full_file_path_and_name, 'r', encoding='utf-8') as file:\n        for line in tqdm(file):\n            # print(\".\", end='')\n            # Strip white space from start/end of line and change to lowercase for use below\n            line = line.strip().lower()\n            # Empty lines are valid at any point in a file\n            if line:\n                # Check if this line contains metadata\n                # Please note that even though metadata is stored in this function it is not currently published externally\n                if line.startswith(\"@problemname\"):\n                    # Check that the data has not started\n                    if data_started:\n                        raise TsFileParseException(\"metadata must come before data\")\n                    # Check that the associated value is valid\n                    tokens = line.split(' ')\n                    token_len = len(tokens)\n\n                    if token_len == 1:\n                        raise TsFileParseException(\"problemname tag requires an associated value\")\n\n                    problem_name = line[len(\"@problemname\") + 1:]\n                    has_problem_name_tag = True\n                    metadata_started = True\n                elif line.startswith(\"@timestamps\"):\n                    # Check that the data has not started\n                    if data_started:\n                        raise TsFileParseException(\"metadata must come before data\")\n\n                    # Check that the associated value is valid\n                    tokens = line.split(' ')\n                    token_len = len(tokens)\n\n                    if token_len != 2:\n                        raise TsFileParseException(\"timestamps tag requires an associated Boolean value\")\n                    elif tokens[1] == \"true\":\n                        timestamps = True\n                    elif tokens[1] == \"false\":\n                        timestamps = False\n                    else:\n                        raise TsFileParseException(\"invalid timestamps value\")\n                    has_timestamps_tag = True\n                    metadata_started = True\n                elif line.startswith(\"@univariate\"):\n                    # Check that the data has not started\n                    if data_started:\n                        raise TsFileParseException(\"metadata must come before data\")\n\n                    # Check that the associated value is valid\n                    tokens = line.split(' ')\n                    token_len = len(tokens)\n                    if token_len != 2:\n                        raise TsFileParseException(\"univariate tag requires an associated Boolean value\")\n                    elif tokens[1] == \"true\":\n                        univariate = True\n                    elif tokens[1] == \"false\":\n                        univariate = False\n                    else:\n                        raise TsFileParseException(\"invalid univariate value\")\n\n                    has_univariate_tag = True\n                    metadata_started = True\n                elif line.startswith(\"@classlabel\"):\n                    # Check that the data has not started\n                    if data_started:\n                        raise TsFileParseException(\"metadata must come before data\")\n\n                    # Check that the associated value is valid\n                    tokens = line.split(' ')\n                    token_len = len(tokens)\n\n                    if token_len == 1:\n                        raise TsFileParseException(\"classlabel tag requires an associated Boolean value\")\n\n                    if tokens[1] == \"true\":\n                        class_labels = True\n                    elif tokens[1] == \"false\":\n                        class_labels = False\n                    else:\n                        raise TsFileParseException(\"invalid classLabel value\")\n\n                    # Check if we have any associated class values\n                    if token_len == 2 and class_labels:\n                        raise TsFileParseException(\"if the classlabel tag is true then class values must be supplied\")\n\n                    has_class_labels_tag = True\n                    class_label_list = [token.strip() for token in tokens[2:]]\n                    metadata_started = True\n                elif line.startswith(\"@targetlabel\"):\n                    # Check that the data has not started\n                    if data_started:\n                        raise TsFileParseException(\"metadata must come before data\")\n\n                    # Check that the associated value is valid\n                    tokens = line.split(' ')\n                    token_len = len(tokens)\n\n                    if token_len == 1:\n                        raise TsFileParseException(\"targetlabel tag requires an associated Boolean value\")\n\n                    if tokens[1] == \"true\":\n                        target_labels = True\n                    elif tokens[1] == \"false\":\n                        target_labels = False\n                    else:\n                        raise TsFileParseException(\"invalid targetLabel value\")\n\n                    has_target_labels_tag = True\n                    class_val_list = []\n                    metadata_started = True\n                # Check if this line contains the start of data\n                elif line.startswith(\"@data\"):\n                    if line != \"@data\":\n                        raise TsFileParseException(\"data tag should not have an associated value\")\n\n                    if data_started and not metadata_started:\n                        raise TsFileParseException(\"metadata must come before data\")\n                    else:\n                        has_data_tag = True\n                        data_started = True\n                # If the 'data tag has been found then metadata has been parsed and data can be loaded\n                elif data_started:\n                    # Check that a full set of metadata has been provided\n                    incomplete_regression_meta_data = not has_problem_name_tag or not has_timestamps_tag or not has_univariate_tag or not has_target_labels_tag or not has_data_tag\n                    incomplete_classification_meta_data = not has_problem_name_tag or not has_timestamps_tag or not has_univariate_tag or not has_class_labels_tag or not has_data_tag\n                    if incomplete_regression_meta_data and incomplete_classification_meta_data:\n                        raise TsFileParseException(\"a full set of metadata has not been provided before the data\")\n\n                    # Replace any missing values with the value specified\n                    line = line.replace(\"?\", replace_missing_vals_with)\n\n                    # Check if we dealing with data that has timestamps\n                    if timestamps:\n                        # We're dealing with timestamps so cannot just split line on ':' as timestamps may contain one\n                        has_another_value = False\n                        has_another_dimension = False\n\n                        timestamps_for_dimension = []\n                        values_for_dimension = []\n\n                        this_line_num_dimensions = 0\n                        line_len = len(line)\n                        char_num = 0\n\n                        while char_num < line_len:\n                            # Move through any spaces\n                            while char_num < line_len and str.isspace(line[char_num]):\n                                char_num += 1\n\n                            # See if there is any more data to read in or if we should validate that read thus far\n\n                            if char_num < line_len:\n\n                                # See if we have an empty dimension (i.e. no values)\n                                if line[char_num] == \":\":\n                                    if len(instance_list) < (this_line_num_dimensions + 1):\n                                        instance_list.append([])\n\n                                    instance_list[this_line_num_dimensions].append(pd.Series())\n                                    this_line_num_dimensions += 1\n\n                                    has_another_value = False\n                                    has_another_dimension = True\n\n                                    timestamps_for_dimension = []\n                                    values_for_dimension = []\n\n                                    char_num += 1\n                                else:\n                                    # Check if we have reached a class label\n                                    if line[char_num] != \"(\" and target_labels:\n                                        class_val = line[char_num:].strip()\n\n                                        # if class_val not in class_val_list:\n                                        #     raise TsFileParseException(\n                                        #         \"the class value '\" + class_val + \"' on line \" + str(\n                                        #             line_num + 1) + \" is not valid\")\n\n                                        class_val_list.append(float(class_val))\n                                        char_num = line_len\n\n                                        has_another_value = False\n                                        has_another_dimension = False\n\n                                        timestamps_for_dimension = []\n                                        values_for_dimension = []\n\n                                    else:\n\n                                        # Read in the data contained within the next tuple\n\n                                        if line[char_num] != \"(\" and not target_labels:\n                                            raise TsFileParseException(\n                                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                                    line_num + 1) + \" does not start with a '('\")\n\n                                        char_num += 1\n                                        tuple_data = \"\"\n\n                                        while char_num < line_len and line[char_num] != \")\":\n                                            tuple_data += line[char_num]\n                                            char_num += 1\n\n                                        if char_num >= line_len or line[char_num] != \")\":\n                                            raise TsFileParseException(\n                                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                                    line_num + 1) + \" does not end with a ')'\")\n\n                                        # Read in any spaces immediately after the current tuple\n\n                                        char_num += 1\n\n                                        while char_num < line_len and str.isspace(line[char_num]):\n                                            char_num += 1\n\n                                        # Check if there is another value or dimension to process after this tuple\n\n                                        if char_num >= line_len:\n                                            has_another_value = False\n                                            has_another_dimension = False\n\n                                        elif line[char_num] == \",\":\n                                            has_another_value = True\n                                            has_another_dimension = False\n\n                                        elif line[char_num] == \":\":\n                                            has_another_value = False\n                                            has_another_dimension = True\n\n                                        char_num += 1\n\n                                        # Get the numeric value for the tuple by reading from the end of the tuple data backwards to the last comma\n\n                                        last_comma_index = tuple_data.rfind(',')\n\n                                        if last_comma_index == -1:\n                                            raise TsFileParseException(\n                                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                                    line_num + 1) + \" contains a tuple that has no comma inside of it\")\n\n                                        try:\n                                            value = tuple_data[last_comma_index + 1:]\n                                            value = float(value)\n\n                                        except ValueError:\n                                            raise TsFileParseException(\n                                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                                    line_num + 1) + \" contains a tuple that does not have a valid numeric value\")\n\n                                        # Check the type of timestamp that we have\n\n                                        timestamp = tuple_data[0: last_comma_index]\n\n                                        try:\n                                            timestamp = int(timestamp)\n                                            timestamp_is_int = True\n                                            timestamp_is_timestamp = False\n                                        except ValueError:\n                                            timestamp_is_int = False\n\n                                        if not timestamp_is_int:\n                                            try:\n                                                timestamp = float(timestamp)\n                                                timestamp_is_float = True\n                                                timestamp_is_timestamp = False\n                                            except ValueError:\n                                                timestamp_is_float = False\n\n                                        if not timestamp_is_int and not timestamp_is_float:\n                                            try:\n                                                timestamp = timestamp.strip()\n                                                timestamp_is_timestamp = True\n                                            except ValueError:\n                                                timestamp_is_timestamp = False\n\n                                        # Make sure that the timestamps in the file (not just this dimension or case) are consistent\n\n                                        if not timestamp_is_timestamp and not timestamp_is_int and not timestamp_is_float:\n                                            raise TsFileParseException(\n                                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                                    line_num + 1) + \" contains a tuple that has an invalid timestamp '\" + timestamp + \"'\")\n\n                                        if previous_timestamp_was_float is not None and previous_timestamp_was_float and not timestamp_is_float:\n                                            raise TsFileParseException(\n                                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                                    line_num + 1) + \" contains tuples where the timestamp format is inconsistent\")\n\n                                        if previous_timestamp_was_int is not None and previous_timestamp_was_int and not timestamp_is_int:\n                                            raise TsFileParseException(\n                                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                                    line_num + 1) + \" contains tuples where the timestamp format is inconsistent\")\n\n                                        if previous_timestamp_was_timestamp is not None and previous_timestamp_was_timestamp and not timestamp_is_timestamp:\n                                            raise TsFileParseException(\n                                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                                    line_num + 1) + \" contains tuples where the timestamp format is inconsistent\")\n\n                                        # Store the values\n\n                                        timestamps_for_dimension += [timestamp]\n                                        values_for_dimension += [value]\n\n                                        #  If this was our first tuple then we store the type of timestamp we had\n\n                                        if previous_timestamp_was_timestamp is None and timestamp_is_timestamp:\n                                            previous_timestamp_was_timestamp = True\n                                            previous_timestamp_was_int = False\n                                            previous_timestamp_was_float = False\n\n                                        if previous_timestamp_was_int is None and timestamp_is_int:\n                                            previous_timestamp_was_timestamp = False\n                                            previous_timestamp_was_int = True\n                                            previous_timestamp_was_float = False\n\n                                        if previous_timestamp_was_float is None and timestamp_is_float:\n                                            previous_timestamp_was_timestamp = False\n                                            previous_timestamp_was_int = False\n                                            previous_timestamp_was_float = True\n\n                                        # See if we should add the data for this dimension\n\n                                        if not has_another_value:\n                                            if len(instance_list) < (this_line_num_dimensions + 1):\n                                                instance_list.append([])\n\n                                            if timestamp_is_timestamp:\n                                                timestamps_for_dimension = pd.DatetimeIndex(timestamps_for_dimension)\n\n                                            instance_list[this_line_num_dimensions].append(\n                                                pd.Series(index=timestamps_for_dimension, data=values_for_dimension))\n                                            this_line_num_dimensions += 1\n\n                                            timestamps_for_dimension = []\n                                            values_for_dimension = []\n\n                            elif has_another_value:\n                                raise TsFileParseException(\n                                    \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                        line_num + 1) + \" ends with a ',' that is not followed by another tuple\")\n\n                            elif has_another_dimension and target_labels:\n                                raise TsFileParseException(\n                                    \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                        line_num + 1) + \" ends with a ':' while it should list a class value\")\n\n                            elif has_another_dimension and not target_labels:\n                                if len(instance_list) < (this_line_num_dimensions + 1):\n                                    instance_list.append([])\n\n                                instance_list[this_line_num_dimensions].append(pd.Series(dtype=np.float32))\n                                this_line_num_dimensions += 1\n                                num_dimensions = this_line_num_dimensions\n\n                            # If this is the 1st line of data we have seen then note the dimensions\n\n                            if not has_another_value and not has_another_dimension:\n                                if num_dimensions is None:\n                                    num_dimensions = this_line_num_dimensions\n\n                                if num_dimensions != this_line_num_dimensions:\n                                    raise TsFileParseException(\"line \" + str(\n                                        line_num + 1) + \" does not have the same number of dimensions as the previous line of data\")\n\n                        # Check that we are not expecting some more data, and if not, store that processed above\n\n                        if has_another_value:\n                            raise TsFileParseException(\n                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                    line_num + 1) + \" ends with a ',' that is not followed by another tuple\")\n\n                        elif has_another_dimension and target_labels:\n                            raise TsFileParseException(\n                                \"dimension \" + str(this_line_num_dimensions + 1) + \" on line \" + str(\n                                    line_num + 1) + \" ends with a ':' while it should list a class value\")\n\n                        elif has_another_dimension and not target_labels:\n                            if len(instance_list) < (this_line_num_dimensions + 1):\n                                instance_list.append([])\n\n                            instance_list[this_line_num_dimensions].append(pd.Series())\n                            this_line_num_dimensions += 1\n                            num_dimensions = this_line_num_dimensions\n\n                        # If this is the 1st line of data we have seen then note the dimensions\n\n                        if not has_another_value and num_dimensions != this_line_num_dimensions:\n                            raise TsFileParseException(\"line \" + str(\n                                line_num + 1) + \" does not have the same number of dimensions as the previous line of data\")\n\n                        # Check if we should have class values, and if so that they are contained in those listed in the metadata\n\n                        if target_labels and len(class_val_list) == 0:\n                            raise TsFileParseException(\"the cases have no associated class values\")\n                    else:\n                        dimensions = line.split(\":\")\n                        # If first row then note the number of dimensions (that must be the same for all cases)\n                        if is_first_case:\n                            num_dimensions = len(dimensions)\n\n                            if target_labels:\n                                num_dimensions -= 1\n\n                            for dim in range(0, num_dimensions):\n                                instance_list.append([])\n                            is_first_case = False\n\n                        # See how many dimensions that the case whose data in represented in this line has\n                        this_line_num_dimensions = len(dimensions)\n\n                        if target_labels:\n                            this_line_num_dimensions -= 1\n\n                        # All dimensions should be included for all series, even if they are empty\n                        if this_line_num_dimensions != num_dimensions:\n                            raise TsFileParseException(\"inconsistent number of dimensions. Expecting \" + str(\n                                num_dimensions) + \" but have read \" + str(this_line_num_dimensions))\n\n                        # Process the data for each dimension\n                        for dim in range(0, num_dimensions):\n                            dimension = dimensions[dim].strip()\n\n                            if dimension:\n                                data_series = dimension.split(\",\")\n                                data_series = [float(i) for i in data_series]\n                                instance_list[dim].append(pd.Series(data_series))\n                            else:\n                                instance_list[dim].append(pd.Series())\n\n                        if target_labels:\n                            class_val_list.append(float(dimensions[num_dimensions].strip()))\n\n            line_num += 1\n\n    # Check that the file was not empty\n    if line_num:\n        # Check that the file contained both metadata and data\n        complete_regression_meta_data = has_problem_name_tag and has_timestamps_tag and has_univariate_tag and has_target_labels_tag and has_data_tag\n        complete_classification_meta_data = has_problem_name_tag and has_timestamps_tag and has_univariate_tag and has_class_labels_tag and has_data_tag\n\n        if metadata_started and not complete_regression_meta_data and not complete_classification_meta_data:\n            raise TsFileParseException(\"metadata incomplete\")\n        elif metadata_started and not data_started:\n            raise TsFileParseException(\"file contained metadata but no data\")\n        elif metadata_started and data_started and len(instance_list) == 0:\n            raise TsFileParseException(\"file contained metadata but no data\")\n\n        # Create a DataFrame from the data parsed above\n        data = pd.DataFrame(dtype=np.float32)\n\n        for dim in range(0, num_dimensions):\n            data['dim_' + str(dim)] = instance_list[dim]\n\n        # Check if we should return any associated class labels separately\n\n        if target_labels:\n            if return_separate_X_and_y:\n                return data, np.asarray(class_val_list)\n            else:\n                data['class_vals'] = pd.Series(class_val_list)\n                return data\n        else:\n            return data\n    else:\n        raise TsFileParseException(\"empty file\")\n\n\ndef process_data(X, min_len, normalise=None):\n    \"\"\"\n    This is a function to process the data, i.e. convert dataframe to numpy array\n    :param X:\n    :param min_len:\n    :param normalise:\n    :return:\n    \"\"\"\n    tmp = []\n    for i in tqdm(range(len(X))):\n        _x = X.iloc[i, :].copy(deep=True)\n\n        # 1. find the maximum length of each dimension\n        all_len = [len(y) for y in _x]\n        max_len = max(all_len)\n\n        # 2. adjust the length of each dimension\n        _y = []\n        for y in _x:\n            # 2.1 fill missing values\n            if y.isnull().any():\n                y = y.interpolate(method='linear', limit_direction='both')\n\n            # 2.2. if length of each dimension is different, uniformly scale the shorter ones to the max length\n            if len(y) < max_len:\n                y = uniform_scaling(y, max_len)\n            _y.append(y)\n        _y = np.array(np.transpose(_y))\n\n        # 3. adjust the length of the series, chop of the longer series\n        _y = _y[:min_len, :]\n\n        # 4. normalise the series\n        if normalise == \"standard\":\n            scaler = StandardScaler().fit(_y)\n            _y = scaler.transform(_y)\n        if normalise == \"minmax\":\n            scaler = MinMaxScaler().fit(_y)\n            _y = scaler.transform(_y)\n\n        tmp.append(_y)\n    X = np.array(tmp)\n    return X"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/main.py",
    "content": "\"\"\"\nWritten by George Zerveas\nModified by Ziyang Huang\n\nIf you use any part of the code in this repository, please consider citing the following paper:\nGeorge Zerveas et al. A Transformer-based Framework for Multivariate Time Series Representation Learning, in\nProceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '21), August 14--18, 2021\n\"\"\"\n\nfrom models.loss import NoFussCrossEntropyLoss, MaskedMSELoss\nfrom dataprepare import *\nfrom optimizers import get_optimizer\nfrom models.loss import get_loss_module\nfrom models.ts_transformer import model_factory\nfrom datasets.datasplit import split_dataset\nfrom datasets.data import data_factory, Normalizer\nfrom utils import utils\nfrom running import setup, pipeline_factory, validate, check_progress, NEG_METRICS\nfrom options import Options\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nimport torch\nfrom tqdm import tqdm\nimport pandas as pd\nimport json\nimport pickle\nimport time\nimport sys\nimport os\nfrom copy import deepcopy\nimport logging\n\nlogging.basicConfig(\n    format='%(asctime)s | %(levelname)s : %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.info(\"Loading packages ...\")\n# 3rd party packages\n# Project modules\n\n\ndef main(config):\n\n    total_epoch_time = 0\n    total_eval_time = 0\n\n    total_start_time = time.time()\n\n    # Add file logging besides stdout\n    file_handler = logging.FileHandler(\n        os.path.join(config['output_dir'], 'output.log'))\n    logger.addHandler(file_handler)\n\n    logger.info('Running:\\n{}\\n'.format(\n        ' '.join(sys.argv)))  # command used to run\n\n    if config['seed'] is not None:\n        torch.manual_seed(config['seed'])\n\n    if config['multi_gpu']:\n        device_ids = [0, 1]\n    device = torch.device('cuda:{}'.format(config['gpu']) if (\n        torch.cuda.is_available() and config['gpu'] != '-1') else 'cpu')\n    logger.info(\"Using device: {}\".format(device))\n    if device == 'cuda':\n        logger.info(\"Device index: {}\".format(torch.cuda.current_device()))\n\n    # Build data\n    logger.info(\"Loading and preprocessing data ...\")\n    data_class = data_factory[config['data_class']]\n    my_data = data_class(config['data_dir'], pattern=config['pattern'],\n                         n_proc=config['n_proc'], limit_size=config['limit_size'], config=config)\n    feat_dim = my_data.feature_df.shape[1]  # dimensionality of data features\n    validation_method = 'StratifiedKFold'\n    labels = my_data.labels_df.values.flatten()\n    # Split dataset\n    test_data = my_data\n    # will be converted to empty list in `split_dataset`, if also test_set_ratio == 0\n    test_indices = None\n    val_data = my_data\n    val_indices = []\n\n    # Note: currently a validation set must exist, either with `val_pattern` or `val_ratio`\n    # Using a `val_pattern` means that `val_ratio` == 0 and `test_ratio` == 0\n\n    # 5 fold\n    accus = []\n    times = []\n    end_epochs = []\n    for i in range(5):\n        fold_start_time = time.time()\n        train_indices, val_indices, test_indices = split_dataset(data_indices=my_data.all_IDs,\n                                                                 validation_method=validation_method,\n                                                                 n_splits=1,\n                                                                 validation_ratio=config['val_ratio'],\n                                                                 # used only if test_indices not explicitly specified\n                                                                 test_set_ratio=config['test_ratio'],\n                                                                 test_indices=test_indices,\n                                                                 random_seed=42,\n                                                                 labels=labels, ith=i)\n        logger.info('{} fold start training!'.format(i))\n        logger.info(\"{} samples may be used for training\".format(\n            len(train_indices)))\n        logger.info(\n            \"{} samples will be used for validation\".format(len(val_indices)))\n        logger.info(\"{} samples will be used for testing\".format(\n            len(test_indices)))\n\n        # Create model\n        logger.info(\"Creating model ...\")\n        if config['task'] == 'pretrain_and_finetune':\n            model, classifier = model_factory(config, my_data, labels)\n        else:\n            model = model_factory(config, my_data)\n\n        if config['global_reg']:\n            weight_decay = config['l2_reg']\n            output_reg = None\n        else:\n            weight_decay = 0\n            output_reg = config['l2_reg']\n\n        optim_class = get_optimizer(config['optimizer'])\n        optimizer = optim_class(\n            model.parameters(), lr=config['lr'], weight_decay=weight_decay)\n\n        start_epoch = 0\n        lr_step = 0  # current step index of `lr_step`\n        lr = config['lr']  # current learning step\n        # Load model and optimizer state\n\n        if config['multi_gpu']:\n            model = nn.DataParallel(model, device_ids)\n            optimizer = nn.DataParallel(optimizer, device_ids)\n\n            if config['task'] == 'pretrain_and_finetune':\n                classifier = nn.DataParallel(classifier, device_ids)\n\n        model.to(device)\n        if config['task'] == 'pretrain_and_finetune':\n            classifier.to(device)\n        elif config['task'] == 'classification':\n            if config['load_root'] is not None:\n                model.load_state_dict(torch.load(os.path.join(\n                    config['load_root'], config['source_dataset'], 'encoder_weights.pt'), device))\n            classifier = model\n\n        loss_module = MaskedMSELoss(reduction='none')\n        classification_module = NoFussCrossEntropyLoss(reduction='none')\n\n        if config['task'] == 'classification':\n            loss_module = classification_module\n        '''\n        if config['multi_gpu']:\n            loss_module = nn.DataParallel(loss_module, device_ids)\n        '''\n        # Initialize data generators\n        if config['task'] == 'pretrain_and_finetune':\n            dataset_class, collate_fn, runner_class, cls_data_class, cls_collate_fn, cls_runner_cls = pipeline_factory(\n                config, device)\n        else:\n            dataset_class, collate_fn, runner_class = pipeline_factory(\n                config, device)\n            cls_data_class, cls_collate_fn, cls_runner_cls = dataset_class, collate_fn, runner_class\n        train_df, val_df, test_df = fill_nan_and_normalize(\n            my_data.feature_df.loc[train_indices], val_data.feature_df.loc[val_indices], test_data.feature_df.loc[test_indices], train_indices, val_indices, test_indices)\n\n        test_dataset = dataset_class(\n            test_data, test_indices, feature_df=test_df)\n        test_loader = DataLoader(dataset=test_dataset,\n                                 batch_size=config['batch_size'],\n                                 shuffle=False,\n                                 num_workers=8,\n                                 pin_memory=True,\n                                 collate_fn=lambda x: cls_collate_fn(x, max_len=model.max_len))\n\n        val_dataset = dataset_class(val_data, val_indices, feature_df=val_df)\n        val_loader = DataLoader(dataset=val_dataset,\n                                batch_size=config['batch_size'],\n                                shuffle=False,\n                                num_workers=8,\n                                pin_memory=True,\n                                collate_fn=lambda x: collate_fn(x, max_len=model.max_len))\n\n        # config['num_workers'],pin_memory=True\n\n        train_dataset = dataset_class(\n            my_data, train_indices, feature_df=train_df)\n        train_loader = DataLoader(dataset=train_dataset,\n                                  batch_size=config['batch_size'],\n                                  shuffle=True,\n                                  num_workers=8,\n                                  pin_memory=True,\n                                  collate_fn=lambda x: collate_fn(x, max_len=model.max_len))\n\n        trainer = runner_class(model, train_loader, device, loss_module, optimizer, l2_reg=output_reg,\n                               print_interval=config['print_interval'], console=config['console'])\n\n        val_evaluator = runner_class(model, val_loader, device, loss_module,\n                                     print_interval=config['print_interval'], console=config['console'])\n\n        test_evaluator = runner_class(model, test_loader, device, loss_module,\n                                      print_interval=config['print_interval'], console=config['console'])\n\n        tensorboard_writer = SummaryWriter(config['tensorboard_dir'])\n\n        # initialize with +inf or -inf depending on key metric\n        best_value = 1e16 if config['key_metric'] in NEG_METRICS else -1e16\n        best_test = 1e16 if config['key_metric'] in NEG_METRICS else -1e16\n\n        best_metrics = {}\n        best_test_metrics = {}\n\n        logger.info('Starting training...')\n        stop_count = 0\n        increase_count = 0\n        last_loss = 1e16\n        val_loss = 1e16\n        best_epoch = 0\n        for epoch in range(start_epoch + 1, config[\"epochs\"] + 1):\n            if stop_count == 50 or increase_count == 50:\n                print('model convergent at epoch {}, early stopping'.format(epoch))\n                break\n            epoch_start_time = time.time()\n            # dictionary of aggregate epoch metrics\n            aggr_metrics_train = trainer.train_epoch(epoch)\n            epoch_runtime = time.time() - epoch_start_time\n            if epoch % 100 == 0:\n                print(\"epoch : {}\".format(epoch))\n\n            if config['task'] == 'pretrain_and_finetune':\n                aggr_metrics_val, best_metrics, best_value, condition = validate(val_evaluator, tensorboard_writer, config,\n                                                                                 best_metrics, best_value, epoch)\n\n                if condition or epoch == 1:\n                    best_epoch = epoch\n                    best_state_dict = deepcopy(model.state_dict())\n\n            elif config['task'] == 'classification':\n                aggr_metrics_val, best_metrics, best_value, condition = validate(val_evaluator, tensorboard_writer, config,\n                                                                                 best_metrics, best_value, epoch)\n\n                if condition or epoch == 1:\n                    best_epoch = epoch\n                    best_state_dict = deepcopy(model.state_dict())\n                    _, best_test_metrics, best_test, _ = validate(test_evaluator, tensorboard_writer, config,\n                                                                  best_test_metrics, best_test, epoch)\n\n                val_loss = aggr_metrics_val['loss']\n                if abs(last_loss - val_loss) <= 1e-4:\n                    stop_count += 1\n                else:\n                    stop_count = 0\n\n                if val_loss > last_loss:\n                    increase_count += 1\n                else:\n                    increase_count = 0\n\n                last_loss = val_loss\n\n        # save encoder weights\n        if config['task'] == 'classification_transfer':\n            save_path = os.path.join(\n                config['weights_save_path'], config['dataset'])\n            if not os.path.exists(save_path):\n                os.makedirs(save_path)\n\n            for key, val in model.state_dict().items():\n                if key.startswith('output_layer'):\n                    state_dict.pop(key)\n            torch.save(state_dict, os.path.join(\n                save_path, 'encoder_weights.pt'))\n\n        if config['task'] == 'pretrain_and_finetune':\n            classifier_optimizer = optim_class(\n                classifier.parameters(), lr=config['lr'], weight_decay=weight_decay)\n            if config['multi_gpu']:\n                classifier_optimizer = nn.DataParallel(\n                    classifier_optimizer, device_ids)\n            finetune_train_dataset = cls_data_class(\n                my_data, train_indices, feature_df=train_df)\n            finetune_train_loader = DataLoader(dataset=finetune_train_dataset,\n                                               batch_size=config['batch_size'],\n                                               shuffle=True,\n                                               num_workers=8,\n                                               collate_fn=lambda x: cls_collate_fn(x, max_len=classifier.max_len))\n            test_dataset = cls_data_class(\n                test_data, test_indices, feature_df=test_df)\n            test_loader = DataLoader(dataset=test_dataset,\n                                     batch_size=config['batch_size'],\n                                     shuffle=False,\n                                     num_workers=8,\n                                     pin_memory=True,\n                                     collate_fn=lambda x: cls_collate_fn(x, max_len=classifier.max_len), drop_last=True)\n\n            val_dataset = cls_data_class(\n                val_data, val_indices, feature_df=val_df)\n            val_loader = DataLoader(dataset=val_dataset,\n                                    batch_size=config['batch_size'],\n                                    shuffle=False,\n                                    num_workers=8,\n                                    pin_memory=True,\n                                    collate_fn=lambda x: cls_collate_fn(x, max_len=classifier.max_len), drop_last=True)\n            val_evaluator = cls_runner_cls(classifier, val_loader, device, classification_module,\n                                           print_interval=config['print_interval'], console=config['console'])\n            test_evaluator = cls_runner_cls(classifier, test_loader, device, classification_module,\n                                            print_interval=config['print_interval'], console=config['console'])\n            classifier_trainer = cls_runner_cls(classifier, finetune_train_loader, device, classification_module, classifier_optimizer, l2_reg=output_reg,\n                                                print_interval=config['print_interval'], console=config['console'])\n            state_dict = deepcopy(best_state_dict)\n\n            for key, val in model.state_dict().items():\n                if key.startswith('output_layer'):\n                    state_dict.pop(key)\n\n            #classifier.module.load_state_dict(state_dict, strict=False)\n\n            for epoch in range(start_epoch + 1, 101):\n                epoch_start_time = time.time()\n                aggr_metrics_train = classifier_trainer.train_epoch(\n                    epoch)  # dictionary of aggregate epoch metrics\n                epoch_runtime = time.time() - epoch_start_time\n\n                aggr_metrics_val, best_metrics, best_value, condition = validate(val_evaluator, tensorboard_writer, config,\n                                                                                 best_metrics, best_value, epoch)\n                if condition or epoch == 1:\n                    _, best_test_metrics, best_test, _ = validate(test_evaluator, tensorboard_writer, config,\n                                                                  best_test_metrics, best_test, epoch)\n\n        logger.info('Best {} was {}. Other metrics: {}'.format(\n            config['key_metric'], best_value, best_metrics))\n        logger.info('{} fold training Done!'.format(i))\n\n        fold_end_time = time.time()\n        accus.append(best_test_metrics['accuracy'].cpu().numpy())\n        times.append(fold_end_time-fold_start_time)\n        end_epochs.append(best_epoch)\n    # TODO 已经有了所有的metric，参照tsmutil将所有的插入表格并开始训练\n    accus = np.array(accus)\n    acc_mean = accus.mean()\n    acc_std = accus.std()\n    time_mean = np.array(times).mean()\n    epoch_mean = np.array(end_epochs).mean()\n\n    if config['task'] == 'pretrain_and_finetune':\n        save_path = './tst_results.csv'\n        if os.path.exists(save_path):\n            result_form = pd.read_csv(save_path)\n        else:\n            result_form = pd.DataFrame(columns=['target', 'accuracy', 'std'])\n\n        result_form = result_form.append(\n            {'target': config['dataset'], 'accuracy': '%.4f' % acc_mean, 'std': '%.4f' % acc_std}, ignore_index=True)\n        result_form = result_form.iloc[:, -3:]\n        result_form.to_csv(save_path)\n\n    elif config['task'] == 'classification':\n        save_path = './non_linear_classification_tst_results.csv'\n        if os.path.exists(save_path):\n            result_form = pd.read_csv(save_path)\n        else:\n            result_form = pd.DataFrame(columns=[\n                                       'dataset_name', 'test_accuracy', 'test_std', 'train_time', 'end_val_epoch', 'seeds'])\n\n        result_form = result_form.append({'dataset_name': config['dataset'], 'test_accuracy': '%.4f' % acc_mean, 'test_std': '%.4f' % acc_std, 'train_time': '%.4f' % time_mean, 'end_val_epoch': '%.2f' % epoch_mean,\n                                          'seeds': '%d' % 42}, ignore_index=True)\n        result_form = result_form.iloc[:, -6:]\n        result_form.to_csv(save_path)\n\n    return best_value\n\n\nif __name__ == '__main__':\n    # set seed\n    SEED = 42\n    np.random.seed(SEED)\n    torch.manual_seed(SEED)\n    torch.cuda.manual_seed(SEED)\n    torch.cuda.manual_seed_all(SEED)\n\n    args = Options().parse()  # `argsparse` object\n    config = setup(args)  # configuration dictionary\n    main(config)\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/models/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/tst_cls/src/models/loss.py",
    "content": "import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\n\ndef get_loss_module(config):\n\n    task = config['task']\n\n    if (task == \"imputation\") or (task == \"transduction\"):\n        return MaskedMSELoss(reduction='none')  # outputs loss for each batch element\n\n    if task == \"classification\":\n        return NoFussCrossEntropyLoss(reduction='none')  # outputs loss for each batch sample\n\n    if task == \"regression\":\n        return nn.MSELoss(reduction='none')  # outputs loss for each batch sample\n\n    else:\n        raise ValueError(\"Loss module for task '{}' does not exist\".format(task))\n\n\ndef l2_reg_loss(model):\n    \"\"\"Returns the squared L2 norm of output layer of given model\"\"\"\n\n    for name, param in model.module.named_parameters():\n        if name == 'output_layer.weight':\n            return torch.sum(torch.square(param))\n\n\nclass NoFussCrossEntropyLoss(nn.CrossEntropyLoss):\n    \"\"\"\n    pytorch's CrossEntropyLoss is fussy: 1) needs Long (int64) targets only, and 2) only 1D.\n    This function satisfies these requirements\n    \"\"\"\n\n    def forward(self, inp, target):\n        return F.cross_entropy(inp, target.long().squeeze(), weight=self.weight,\n                               ignore_index=self.ignore_index, reduction=self.reduction)\n\n\nclass MaskedMSELoss(nn.Module):\n    \"\"\" Masked MSE Loss\n    \"\"\"\n\n    def __init__(self, reduction: str = 'mean'):\n\n        super().__init__()\n\n        self.reduction = reduction\n        self.mse_loss = nn.MSELoss(reduction=self.reduction)\n\n    def forward(self,\n                y_pred: torch.Tensor, y_true: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:\n        \"\"\"Compute the loss between a target value and a prediction.\n\n        Args:\n            y_pred: Estimated values\n            y_true: Target values\n            mask: boolean tensor with 0s at places where values should be ignored and 1s where they should be considered\n\n        Returns\n        -------\n        if reduction == 'none':\n            (num_active,) Loss for each active batch element as a tensor with gradient attached.\n        if reduction == 'mean':\n            scalar mean loss over batch as a tensor with gradient attached.\n        \"\"\"\n\n        # for this particular loss, one may also elementwise multiply y_pred and y_true with the inverted mask\n        masked_pred = torch.masked_select(y_pred, mask)\n        masked_true = torch.masked_select(y_true, mask)\n\n        return self.mse_loss(masked_pred, masked_true)\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/models/ts_transformer.py",
    "content": "from typing import Optional, Any\nimport math\nimport numpy as np\nimport torch\nfrom torch import nn, Tensor\nfrom torch.nn import functional as F\nfrom torch.nn.modules import MultiheadAttention, Linear, Dropout, BatchNorm1d, TransformerEncoderLayer\n\n\ndef model_factory(config, data, labels=None):\n    task = config['task']\n    feat_dim = data.feature_df.shape[1]  # dimensionality of data features\n    # data windowing is used when samples don't have a predefined length or the length is too long\n    max_seq_len = config['data_window_len'] if config['data_window_len'] is not None else config['max_seq_len']\n    if max_seq_len is None:\n        try:\n            max_seq_len = data.max_seq_len\n        except AttributeError as x:\n            print(\"Data class does not define a maximum sequence length, so it must be defined with the script argument `max_seq_len`\")\n            raise x\n    if task == \"pretrain_and_finetune\":\n        if labels is not None:\n            num_labels = len(np.unique(labels))\n            print(\"numlabel is {}\".format(num_labels))\n        encoder = TSTransformerEncoder(feat_dim, max_seq_len, config['d_model'], config['num_heads'],\n                                       config['num_layers'], config['dim_feedforward'], dropout=config['dropout'],\n                                       pos_encoding=config['pos_encoding'], activation=config['activation'],\n                                       norm=config['normalization_layer'], freeze=config['freeze'])\n\n        classifier = TSTransformerEncoderClassiregressor(feat_dim, max_seq_len, config['d_model'],\n                                                         config['num_heads'],\n                                                         config['num_layers'], config['dim_feedforward'],\n                                                         num_classes=num_labels,\n                                                         dropout=config['dropout'], pos_encoding=config['pos_encoding'],\n                                                         activation=config['activation'],\n                                                         norm=config['normalization_layer'], freeze=config['freeze'])\n\n        return encoder, classifier\n\n    if (task == \"imputation\") or (task == \"transduction\"):\n        if config['model'] == 'LINEAR':\n            return DummyTSTransformerEncoder(feat_dim, max_seq_len, config['d_model'], config['num_heads'],\n                                             config['num_layers'], config['dim_feedforward'], dropout=config['dropout'],\n                                             pos_encoding=config['pos_encoding'], activation=config['activation'],\n                                             norm=config['normalization_layer'], freeze=config['freeze'])\n        elif config['model'] == 'transformer':\n            return TSTransformerEncoder(feat_dim, max_seq_len, config['d_model'], config['num_heads'],\n                                        config['num_layers'], config['dim_feedforward'], dropout=config['dropout'],\n                                        pos_encoding=config['pos_encoding'], activation=config['activation'],\n                                        norm=config['normalization_layer'], freeze=config['freeze'])\n\n    if (task == \"classification\") or (task == \"regression\"):\n        # dimensionality of labels\n        num_labels = len(\n            data.class_names) if task == \"classification\" else data.labels_df.shape[1]\n        if config['model'] == 'LINEAR':\n            return DummyTSTransformerEncoderClassiregressor(feat_dim, max_seq_len, config['d_model'],\n                                                            config['num_heads'],\n                                                            config['num_layers'], config['dim_feedforward'],\n                                                            num_classes=num_labels,\n                                                            dropout=config['dropout'], pos_encoding=config['pos_encoding'],\n                                                            activation=config['activation'],\n                                                            norm=config['normalization_layer'], freeze=config['freeze'])\n        elif config['model'] == 'transformer':\n            return TSTransformerEncoderClassiregressor(feat_dim, max_seq_len, config['d_model'],\n                                                       config['num_heads'],\n                                                       config['num_layers'], config['dim_feedforward'],\n                                                       num_classes=num_labels,\n                                                       dropout=config['dropout'], pos_encoding=config['pos_encoding'],\n                                                       activation=config['activation'],\n                                                       norm=config['normalization_layer'], freeze=config['freeze'], nonlinear=config['nonlinear'])\n    else:\n        raise ValueError(\n            \"Model class for task '{}' does not exist\".format(task))\n\n\ndef _get_activation_fn(activation):\n    if activation == \"relu\":\n        return F.relu\n    elif activation == \"gelu\":\n        return F.gelu\n    raise ValueError(\n        \"activation should be relu/gelu, not {}\".format(activation))\n\n\n# From https://github.com/pytorch/examples/blob/master/word_language_model/model.py\nclass FixedPositionalEncoding(nn.Module):\n    r\"\"\"Inject some information about the relative or absolute position of the tokens\n        in the sequence. The positional encodings have the same dimension as\n        the embeddings, so that the two can be summed. Here, we use sine and cosine\n        functions of different frequencies.\n    .. math::\n        \\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))\n        \\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))\n        \\text{where pos is the word position and i is the embed idx)\n    Args:\n        d_model: the embed dim (required).\n        dropout: the dropout value (default=0.1).\n        max_len: the max. length of the incoming sequence (default=1024).\n    \"\"\"\n\n    def __init__(self, d_model, dropout=0.1, max_len=1024, scale_factor=1.0):\n        super(FixedPositionalEncoding, self).__init__()\n        self.dropout = nn.Dropout(p=dropout)\n\n        pe = torch.zeros(max_len, d_model)  # positional encoding\n        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n        div_term = torch.exp(torch.arange(\n            0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n        pe = scale_factor * pe.unsqueeze(0).transpose(0, 1)\n        # this stores the variable in the state_dict (used for non-trainable variables)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        r\"\"\"Inputs of forward function\n        Args:\n            x: the sequence fed to the positional encoder model (required).\n        Shape:\n            x: [sequence length, batch size, embed dim]\n            output: [sequence length, batch size, embed dim]\n        \"\"\"\n\n        x = x + self.pe[:x.size(0), :]\n        return self.dropout(x)\n\n\nclass LearnablePositionalEncoding(nn.Module):\n\n    def __init__(self, d_model, dropout=0.1, max_len=1024):\n        super(LearnablePositionalEncoding, self).__init__()\n        self.dropout = nn.Dropout(p=dropout)\n        # Each position gets its own embedding\n        # Since indices are always 0 ... max_len, we don't have to do a look-up\n        # requires_grad automatically set to True\n        self.pe = nn.Parameter(torch.empty(max_len, 1, d_model))\n        nn.init.uniform_(self.pe, -0.02, 0.02)\n\n    def forward(self, x):\n        r\"\"\"Inputs of forward function\n        Args:\n            x: the sequence fed to the positional encoder model (required).\n        Shape:\n            x: [sequence length, batch size, embed dim]\n            output: [sequence length, batch size, embed dim]\n        \"\"\"\n\n        x = x + self.pe[:x.size(0), :]\n        return self.dropout(x)\n\n\ndef get_pos_encoder(pos_encoding):\n    if pos_encoding == \"learnable\":\n        return LearnablePositionalEncoding\n    elif pos_encoding == \"fixed\":\n        return FixedPositionalEncoding\n\n    raise NotImplementedError(\n        \"pos_encoding should be 'learnable'/'fixed', not '{}'\".format(pos_encoding))\n\n\nclass TransformerBatchNormEncoderLayer(nn.modules.Module):\n    r\"\"\"This transformer encoder layer block is made up of self-attn and feedforward network.\n    It differs from TransformerEncoderLayer in torch/nn/modules/transformer.py in that it replaces LayerNorm\n    with BatchNorm.\n\n    Args:\n        d_model: the number of expected features in the input (required).\n        nhead: the number of heads in the multiheadattention models (required).\n        dim_feedforward: the dimension of the feedforward network model (default=2048).\n        dropout: the dropout value (default=0.1).\n        activation: the activation function of intermediate layer, relu or gelu (default=relu).\n    \"\"\"\n\n    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"):\n        super(TransformerBatchNormEncoderLayer, self).__init__()\n        self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)\n        # Implementation of Feedforward model\n        self.linear1 = Linear(d_model, dim_feedforward)\n        self.dropout = Dropout(dropout)\n        self.linear2 = Linear(dim_feedforward, d_model)\n\n        # normalizes each feature across batch samples and time steps\n        self.norm1 = BatchNorm1d(d_model, eps=1e-5)\n        self.norm2 = BatchNorm1d(d_model, eps=1e-5)\n        self.dropout1 = Dropout(dropout)\n        self.dropout2 = Dropout(dropout)\n\n        self.activation = _get_activation_fn(activation)\n\n    def __setstate__(self, state):\n        if 'activation' not in state:\n            state['activation'] = F.relu\n        super(TransformerBatchNormEncoderLayer, self).__setstate__(state)\n\n    def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,\n                src_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n        r\"\"\"Pass the input through the encoder layer.\n\n        Args:\n            src: the sequence to the encoder layer (required).\n            src_mask: the mask for the src sequence (optional).\n            src_key_padding_mask: the mask for the src keys per batch (optional).\n\n        Shape:\n            see the docs in Transformer class.\n        \"\"\"\n        src2 = self.self_attn(src, src, src, attn_mask=src_mask,\n                              key_padding_mask=src_key_padding_mask)[0]\n        src = src + self.dropout1(src2)  # (seq_len, batch_size, d_model)\n        src = src.permute(1, 2, 0)  # (batch_size, d_model, seq_len)\n        # src = src.reshape([src.shape[0], -1])  # (batch_size, seq_length * d_model)\n        src = self.norm1(src)\n        src = src.permute(2, 0, 1)  # restore (seq_len, batch_size, d_model)\n        src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n        src = src + self.dropout2(src2)  # (seq_len, batch_size, d_model)\n        src = src.permute(1, 2, 0)  # (batch_size, d_model, seq_len)\n        src = self.norm2(src)\n        src = src.permute(2, 0, 1)  # restore (seq_len, batch_size, d_model)\n        return src\n\n\nclass TSTransformerEncoder(nn.Module):\n\n    def __init__(self, feat_dim, max_len, d_model, n_heads, num_layers, dim_feedforward, dropout=0.1,\n                 pos_encoding='fixed', activation='gelu', norm='BatchNorm', freeze=False):\n        super(TSTransformerEncoder, self).__init__()\n\n        self.max_len = max_len\n        self.d_model = d_model\n        self.n_heads = n_heads\n\n        self.project_inp = nn.Linear(feat_dim, d_model)\n        self.pos_enc = get_pos_encoder(pos_encoding)(\n            d_model, dropout=dropout*(1.0 - freeze), max_len=max_len)\n\n        if norm == 'LayerNorm':\n            encoder_layer = TransformerEncoderLayer(\n                d_model, self.n_heads, dim_feedforward, dropout*(1.0 - freeze), activation=activation)\n        else:\n            encoder_layer = TransformerBatchNormEncoderLayer(\n                d_model, self.n_heads, dim_feedforward, dropout*(1.0 - freeze), activation=activation)\n\n        self.transformer_encoder = nn.TransformerEncoder(\n            encoder_layer, num_layers)\n\n        self.output_layer = nn.Linear(d_model, feat_dim)\n\n        self.act = _get_activation_fn(activation)\n\n        self.dropout1 = nn.Dropout(dropout)\n\n        self.feat_dim = feat_dim\n\n    def forward(self, X, padding_masks):\n        \"\"\"\n        Args:\n            X: (batch_size, seq_length, feat_dim) torch tensor of masked features (input)\n            padding_masks: (batch_size, seq_length) boolean tensor, 1 means keep vector at this position, 0 means padding\n        Returns:\n            output: (batch_size, seq_length, feat_dim)\n        \"\"\"\n\n        # permute because pytorch convention for transformers is [seq_length, batch_size, feat_dim]. padding_masks [batch_size, feat_dim]\n        inp = X.permute(1, 0, 2)\n        inp = self.project_inp(inp) * math.sqrt(\n            self.d_model)  # [seq_length, batch_size, d_model] project input vectors to d_model dimensional space\n        inp = self.pos_enc(inp)  # add positional encoding\n        # NOTE: logic for padding masks is reversed to comply with definition in MultiHeadAttention, TransformerEncoderLayer\n        # (seq_length, batch_size, d_model)\n        output = self.transformer_encoder(\n            inp, src_key_padding_mask=~padding_masks)\n        # the output transformer encoder/decoder embeddings don't include non-linearity\n        output = self.act(output)\n        output = output.permute(1, 0, 2)  # (batch_size, seq_length, d_model)\n        output = self.dropout1(output)\n        # Most probably defining a Linear(d_model,feat_dim) vectorizes the operation over (seq_length, batch_size).\n        # (batch_size, seq_length, feat_dim)\n        output = self.output_layer(output)\n\n        return output\n\n\nclass TSTransformerEncoderClassiregressor(nn.Module):\n    \"\"\"\n    Simplest classifier/regressor. Can be either regressor or classifier because the output does not include\n    softmax. Concatenates final layer embeddings and uses 0s to ignore padding embeddings in final output layer.\n    \"\"\"\n\n    def __init__(self, feat_dim, max_len, d_model, n_heads, num_layers, dim_feedforward, num_classes,\n                 dropout=0.1, pos_encoding='fixed', activation='gelu', norm='BatchNorm', freeze=False, nonlinear=True):\n        super(TSTransformerEncoderClassiregressor, self).__init__()\n\n        self.max_len = max_len\n        self.d_model = d_model\n        self.n_heads = n_heads\n\n        self.project_inp = nn.Linear(feat_dim, d_model)\n        self.pos_enc = get_pos_encoder(pos_encoding)(\n            d_model, dropout=dropout*(1.0 - freeze), max_len=max_len)\n\n        if norm == 'LayerNorm':\n            encoder_layer = TransformerEncoderLayer(\n                d_model, self.n_heads, dim_feedforward, dropout*(1.0 - freeze), activation=activation)\n        else:\n            encoder_layer = TransformerBatchNormEncoderLayer(\n                d_model, self.n_heads, dim_feedforward, dropout*(1.0 - freeze), activation=activation)\n\n        self.transformer_encoder = nn.TransformerEncoder(\n            encoder_layer, num_layers)\n\n        self.act = _get_activation_fn(activation)\n\n        self.dropout1 = nn.Dropout(dropout)\n\n        self.feat_dim = feat_dim\n        self.num_classes = num_classes\n        self.output_layer = self.build_output_module(\n            d_model, max_len, num_classes)\n\n    def build_output_module(self, d_model, max_len, num_classes, nonlinear=False):\n        if nonlinear:\n            net = nn.Sequential(\n                nn.Linear(d_model * max_len, d_model * max_len),\n                nn.BatchNorm1d(d_model * max_len),\n                nn.ReLU(),\n                nn.Dropout(0.2),\n                nn.Linear(d_model * max_len, num_classes),\n                nn.Softmax(dim=1)\n            )\n            return net\n        else:\n            output_layer = nn.Linear(d_model * max_len, num_classes)\n\n        # no softmax (or log softmax), because CrossEntropyLoss does this internally. If probabilities are needed,\n        # add F.log_softmax and use NLLoss\n            return output_layer\n\n    def forward(self, X, padding_masks):\n        \"\"\"\n        Args:\n            X: (batch_size, seq_length, feat_dim) torch tensor of masked features (input)\n            padding_masks: (batch_size, seq_length) boolean tensor, 1 means keep vector at this position, 0 means padding\n        Returns:\n            output: (batch_size, num_classes)\n        \"\"\"\n\n        # permute because pytorch convention for transformers is [seq_length, batch_size, feat_dim]. padding_masks [batch_size, feat_dim]\n        inp = X.permute(1, 0, 2)\n        inp = self.project_inp(inp) * math.sqrt(\n            self.d_model)  # [seq_length, batch_size, d_model] project input vectors to d_model dimensional space\n        inp = self.pos_enc(inp)  # add positional encoding\n        # NOTE: logic for padding masks is reversed to comply with definition in MultiHeadAttention, TransformerEncoderLayer\n        # (seq_length, batch_size, d_model)\n        output = self.transformer_encoder(\n            inp, src_key_padding_mask=~padding_masks)\n        # the output transformer encoder/decoder embeddings don't include non-linearity\n        output = self.act(output)\n        output = output.permute(1, 0, 2)  # (batch_size, seq_length, d_model)\n        output = self.dropout1(output)\n\n        # Output\n        # zero-out padding embeddings\n        output = output * padding_masks.unsqueeze(-1)\n        # (batch_size, seq_length * d_model)\n        output = output.reshape(output.shape[0], -1)\n        output = self.output_layer(output)  # (batch_size, num_classes)\n\n        return output\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/optimizers.py",
    "content": "import math\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\ndef get_optimizer(name):\n\n    if name == \"Adam\":\n        return torch.optim.Adam\n    elif name == \"RAdam\":\n        return RAdam\n\n\n# from https://github.com/LiyuanLucasLiu/RAdam/blob/master/radam/radam.py\nclass RAdam(Optimizer):\n\n    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):\n        if not 0.0 <= lr:\n            raise ValueError(\"Invalid learning rate: {}\".format(lr))\n        if not 0.0 <= eps:\n            raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n        if not 0.0 <= betas[0] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n        if not 0.0 <= betas[1] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n\n        self.degenerated_to_sgd = degenerated_to_sgd\n        if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):\n            for param in params:\n                if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):\n                    param['buffer'] = [[None, None, None] for _ in range(10)]\n        defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\n                        buffer=[[None, None, None] for _ in range(10)])\n        super(RAdam, self).__init__(params, defaults)\n\n    def __setstate__(self, state):\n        super(RAdam, self).__setstate__(state)\n\n    def step(self, closure=None):\n\n        loss = None\n        if closure is not None:\n            loss = closure()\n\n        for group in self.param_groups:\n\n            for p in group['params']:\n                if p.grad is None:\n                    continue\n                grad = p.grad.data.float()\n                if grad.is_sparse:\n                    raise RuntimeError('RAdam does not support sparse gradients')\n\n                p_data_fp32 = p.data.float()\n\n                state = self.state[p]\n\n                if len(state) == 0:\n                    state['step'] = 0\n                    state['exp_avg'] = torch.zeros_like(p_data_fp32)\n                    state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n                else:\n                    state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n                    state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n                beta1, beta2 = group['betas']\n\n                exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n                exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n                state['step'] += 1\n                buffered = group['buffer'][int(state['step'] % 10)]\n                if state['step'] == buffered[0]:\n                    N_sma, step_size = buffered[1], buffered[2]\n                else:\n                    buffered[0] = state['step']\n                    beta2_t = beta2 ** state['step']\n                    N_sma_max = 2 / (1 - beta2) - 1\n                    N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n                    buffered[1] = N_sma\n\n                    # more conservative since it's an approximated value\n                    if N_sma >= 5:\n                        step_size = math.sqrt(\n                            (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n                                        N_sma_max - 2)) / (1 - beta1 ** state['step'])\n                    elif self.degenerated_to_sgd:\n                        step_size = 1.0 / (1 - beta1 ** state['step'])\n                    else:\n                        step_size = -1\n                    buffered[2] = step_size\n\n                # more conservative since it's an approximated value\n                if N_sma >= 5:\n                    if group['weight_decay'] != 0:\n                        p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n                    denom = exp_avg_sq.sqrt().add_(group['eps'])\n                    p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)\n                    p.data.copy_(p_data_fp32)\n                elif step_size > 0:\n                    if group['weight_decay'] != 0:\n                        p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n                    p_data_fp32.add_(-step_size * group['lr'], exp_avg)\n                    p.data.copy_(p_data_fp32)\n\n        return loss\n\n\nclass PlainRAdam(Optimizer):\n\n    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):\n        if not 0.0 <= lr:\n            raise ValueError(\"Invalid learning rate: {}\".format(lr))\n        if not 0.0 <= eps:\n            raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n        if not 0.0 <= betas[0] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n        if not 0.0 <= betas[1] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n\n        self.degenerated_to_sgd = degenerated_to_sgd\n        defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n\n        super(PlainRAdam, self).__init__(params, defaults)\n\n    def __setstate__(self, state):\n        super(PlainRAdam, self).__setstate__(state)\n\n    def step(self, closure=None):\n\n        loss = None\n        if closure is not None:\n            loss = closure()\n\n        for group in self.param_groups:\n\n            for p in group['params']:\n                if p.grad is None:\n                    continue\n                grad = p.grad.data.float()\n                if grad.is_sparse:\n                    raise RuntimeError('RAdam does not support sparse gradients')\n\n                p_data_fp32 = p.data.float()\n\n                state = self.state[p]\n\n                if len(state) == 0:\n                    state['step'] = 0\n                    state['exp_avg'] = torch.zeros_like(p_data_fp32)\n                    state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n                else:\n                    state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n                    state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n                beta1, beta2 = group['betas']\n\n                exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n                exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n                state['step'] += 1\n                beta2_t = beta2 ** state['step']\n                N_sma_max = 2 / (1 - beta2) - 1\n                N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\n                # more conservative since it's an approximated value\n                if N_sma >= 5:\n                    if group['weight_decay'] != 0:\n                        p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n                    step_size = group['lr'] * math.sqrt(\n                        (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n                                    N_sma_max - 2)) / (1 - beta1 ** state['step'])\n                    denom = exp_avg_sq.sqrt().add_(group['eps'])\n                    p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n                    p.data.copy_(p_data_fp32)\n                elif self.degenerated_to_sgd:\n                    if group['weight_decay'] != 0:\n                        p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n                    step_size = group['lr'] / (1 - beta1 ** state['step'])\n                    p_data_fp32.add_(-step_size, exp_avg)\n                    p.data.copy_(p_data_fp32)\n\n        return loss\n\n\nclass AdamW(Optimizer):\n\n    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup=0):\n        if not 0.0 <= lr:\n            raise ValueError(\"Invalid learning rate: {}\".format(lr))\n        if not 0.0 <= eps:\n            raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n        if not 0.0 <= betas[0] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n        if not 0.0 <= betas[1] < 1.0:\n            raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n\n        defaults = dict(lr=lr, betas=betas, eps=eps,\n                        weight_decay=weight_decay, warmup=warmup)\n        super(AdamW, self).__init__(params, defaults)\n\n    def __setstate__(self, state):\n        super(AdamW, self).__setstate__(state)\n\n    def step(self, closure=None):\n        loss = None\n        if closure is not None:\n            loss = closure()\n\n        for group in self.param_groups:\n\n            for p in group['params']:\n                if p.grad is None:\n                    continue\n                grad = p.grad.data.float()\n                if grad.is_sparse:\n                    raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n                p_data_fp32 = p.data.float()\n\n                state = self.state[p]\n\n                if len(state) == 0:\n                    state['step'] = 0\n                    state['exp_avg'] = torch.zeros_like(p_data_fp32)\n                    state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n                else:\n                    state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n                    state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n                beta1, beta2 = group['betas']\n\n                state['step'] += 1\n\n                exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n                exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n                denom = exp_avg_sq.sqrt().add_(group['eps'])\n                bias_correction1 = 1 - beta1 ** state['step']\n                bias_correction2 = 1 - beta2 ** state['step']\n\n                if group['warmup'] > state['step']:\n                    scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']\n                else:\n                    scheduled_lr = group['lr']\n\n                step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1\n\n                if group['weight_decay'] != 0:\n                    p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)\n\n                p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n\n                p.data.copy_(p_data_fp32)\n\n        return loss\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/options.py",
    "content": "import argparse\n\n\nclass Options(object):\n\n    def __init__(self):\n\n        # Handle command line arguments\n        self.parser = argparse.ArgumentParser(\n            description='Run a complete training pipeline. Optionally, a JSON configuration file can be used, to overwrite command-line arguments.')\n\n        ## Run from config file\n        self.parser.add_argument('--config', dest='config_filepath',\n                                 help='Configuration .json file (optional). Overwrites existing command-line args!')\n\n        ## Run from command-line arguments\n        # I/O\n        self.parser.add_argument('--output_dir', default='./results',\n                                 help='Root output directory. Must exist. Time-stamped directories will be created inside.')\n        self.parser.add_argument('--data_dir', default='./data',\n                                 help='Data directory')\n        self.parser.add_argument('--load_model',\n                                 help='Path to pre-trained model.')\n        self.parser.add_argument('--resume', action='store_true',\n                                 help='If set, will load `starting_epoch` and state of optimizer, besides model weights.')\n        self.parser.add_argument('--change_output', action='store_true',\n                                 help='Whether the loaded model will be fine-tuned on a different task (necessitating a different output layer)')\n        self.parser.add_argument('--save_all', action='store_true',\n                                 help='If set, will save model weights (and optimizer state) for every epoch; otherwise just latest')\n        self.parser.add_argument('--name', dest='experiment_name', default='',\n                                 help='A string identifier/name for the experiment to be run - it will be appended to the output directory name, before the timestamp')\n        self.parser.add_argument('--comment', type=str, default='', help='A comment/description of the experiment')\n        self.parser.add_argument('--no_timestamp', action='store_true',\n                                 help='If set, a timestamp will not be appended to the output directory name')\n        self.parser.add_argument('--records_file', default='./records.xls',\n                                 help='Excel file keeping all records of experiments')\n        # System\n        self.parser.add_argument('--console', action='store_true',\n                                 help=\"Optimize printout for console output; otherwise for file\")\n        self.parser.add_argument('--print_interval', type=int, default=100,\n                                 help='Print batch info every this many batches')\n        self.parser.add_argument('--gpu', type=str, default='0',\n                                 help='GPU index, -1 for CPU')\n        self.parser.add_argument('--n_proc', type=int, default=-1,\n                                 help='Number of processes for data loading/preprocessing. By default, equals num. of available cores.')\n        self.parser.add_argument('--num_workers', type=int, default=5,\n                                 help='dataloader threads. 0 for single-thread.')\n        self.parser.add_argument('--seed',\n                                 help='Seed used for splitting sets. None by default, set to an integer for reproducibility')\n        # Dataset\n        self.parser.add_argument('--limit_size', type=float, default=None,\n                                 help=\"Limit  dataset to specified smaller random sample, e.g. for rapid debugging purposes. \"\n                                      \"If in [0,1], it will be interpreted as a proportion of the dataset, \"\n                                      \"otherwise as an integer absolute number of samples\")\n        self.parser.add_argument('--test_only', choices={'testset', 'fold_transduction'},\n                                 help='If set, no training will take place; instead, trained model will be loaded and evaluated on test set')\n        self.parser.add_argument('--data_class', type=str, default='tsra',\n                                 help=\"Which type of data should be processed.\")\n        self.parser.add_argument('--labels', type=str,\n                                 help=\"In case a dataset contains several labels (multi-task), \"\n                                      \"which type of labels should be used in regression or classification, i.e. name of column(s).\")\n        self.parser.add_argument('--test_from',\n                                 help='If given, will read test IDs from specified text file containing sample IDs one in each row')\n        self.parser.add_argument('--test_ratio', type=float, default=0,\n                                 help=\"Set aside this proportion of the dataset as a test set\")\n        self.parser.add_argument('--val_ratio', type=float, default=0.2,\n                                 help=\"Proportion of the dataset to be used as a validation set\")\n        self.parser.add_argument('--pattern', type=str,\n                                 help='Regex pattern used to select files contained in `data_dir`. If None, all data will be used.')\n        self.parser.add_argument('--val_pattern', type=str,\n                                 help=\"\"\"Regex pattern used to select files contained in `data_dir` exclusively for the validation set.\n                            If None, a positive `val_ratio` will be used to reserve part of the common data set.\"\"\")\n        self.parser.add_argument('--test_pattern', type=str,\n                                 help=\"\"\"Regex pattern used to select files contained in `data_dir` exclusively for the test set.\n                            If None, `test_ratio`, if specified, will be used to reserve part of the common data set.\"\"\")\n        self.parser.add_argument('--normalization',\n                                 choices={'standardization', 'minmax', 'per_sample_std', 'per_sample_minmax'},\n                                 default='standardization',\n                                 help='If specified, will apply normalization on the input features of a dataset.')\n        self.parser.add_argument('--norm_from',\n                                 help=\"\"\"If given, will read normalization values (e.g. mean, std, min, max) from specified pickle file.\n                            The columns correspond to features, rows correspond to mean, std or min, max.\"\"\")\n        self.parser.add_argument('--subsample_factor', type=int,\n                                 help='Sub-sampling factor used for long sequences: keep every kth sample')\n        # Training process\n        self.parser.add_argument('--task', choices={\"imputation\", \"transduction\", \"classification\", \"regression\", \"pretrain_and_finetune\"},\n                                 default=\"imputation\",\n                                 help=(\"Training objective/task: imputation of masked values,\\n\"\n                                       \"                          transduction of features to other features,\\n\"\n                                       \"                          classification of entire time series,\\n\"\n                                       \"                          regression of scalar(s) for entire time series\"))\n        self.parser.add_argument('--masking_ratio', type=float, default=0.15,\n                                 help='Imputation: mask this proportion of each variable')\n        self.parser.add_argument('--mean_mask_length', type=float, default=3,\n                                 help=\"Imputation: the desired mean length of masked segments. Used only when `mask_distribution` is 'geometric'.\")\n        self.parser.add_argument('--mask_mode', choices={'separate', 'concurrent'}, default='separate',\n                                 help=(\"Imputation: whether each variable should be masked separately \"\n                                       \"or all variables at a certain positions should be masked concurrently\"))\n        self.parser.add_argument('--mask_distribution', choices={'geometric', 'bernoulli'}, default='geometric',\n                                 help=(\"Imputation: whether each mask sequence element is sampled independently at random\"\n                                       \"or whether sampling follows a markov chain (stateful), resulting in \"\n                                       \"geometric distributions of masked squences of a desired mean_mask_length\"))\n        self.parser.add_argument('--exclude_feats', type=str, default=None,\n                                 help='Imputation: Comma separated string of indices corresponding to features to be excluded from masking')\n        self.parser.add_argument('--mask_feats', type=str, default='0, 1',\n                                 help='Transduction: Comma separated string of indices corresponding to features to be masked')\n        self.parser.add_argument('--start_hint', type=float, default=0.0,\n                                 help='Transduction: proportion at the beginning of time series which will not be masked')\n        self.parser.add_argument('--end_hint', type=float, default=0.0,\n                                 help='Transduction: proportion at the end of time series which will not be masked')\n        self.parser.add_argument('--harden', action='store_true',\n                                 help='Makes training objective progressively harder, by masking more of the input')\n\n        self.parser.add_argument('--epochs', type=int, default=400,\n                                 help='Number of training epochs')\n        self.parser.add_argument('--val_interval', type=int, default=1,\n                                 help='Evaluate on validation set every this many epochs. Must be >= 1.')\n        self.parser.add_argument('--optimizer', choices={\"Adam\", \"RAdam\"}, default=\"RAdam\", help=\"Optimizer\")\n        self.parser.add_argument('--lr', type=float, default=1e-3,\n                                 help='learning rate (default holds for batch size 64)')\n        self.parser.add_argument('--lr_step', type=str, default='1000000',\n                                 help='Comma separated string of epochs when to reduce learning rate by a factor of 10.'\n                                      ' The default is a large value, meaning that the learning rate will not change.')\n        self.parser.add_argument('--lr_factor', type=str, default='0.1',\n                                 help=(\"Comma separated string of multiplicative factors to be applied to lr \"\n                                       \"at corresponding steps specified in `lr_step`. If a single value is provided, \"\n                                       \"it will be replicated to match the number of steps in `lr_step`.\"))\n        self.parser.add_argument('--batch_size', type=int, default=64,\n                                 help='Training batch size')\n        self.parser.add_argument('--l2_reg', type=float, default=0,\n                                 help='L2 weight regularization parameter')\n        self.parser.add_argument('--global_reg', action='store_true',\n                                 help='If set, L2 regularization will be applied to all weights instead of only the output layer')\n        self.parser.add_argument('--key_metric', choices={'loss', 'accuracy', 'precision'}, default='loss',\n                                 help='Metric used for defining best epoch')\n        self.parser.add_argument('--freeze', action='store_true',\n                                 help='If set, freezes all layer parameters except for the output layer. Also removes dropout except before the output layer')\n\n        # Model\n        self.parser.add_argument('--model', choices={\"transformer\", \"LINEAR\"}, default=\"transformer\",\n                                 help=\"Model class\")\n        self.parser.add_argument('--max_seq_len', type=int,\n                                 help=\"\"\"Maximum input sequence length. Determines size of transformer layers.\n                                 If not provided, then the value defined inside the data class will be used.\"\"\")\n        self.parser.add_argument('--data_window_len', type=int,\n                                 help=\"\"\"Used instead of the `max_seq_len`, when the data samples must be\n                                 segmented into windows. Determines maximum input sequence length \n                                 (size of transformer layers).\"\"\")\n        self.parser.add_argument('--d_model', type=int, default=64,\n                                 help='Internal dimension of transformer embeddings')\n        self.parser.add_argument('--dim_feedforward', type=int, default=256,\n                                 help='Dimension of dense feedforward part of transformer layer')\n        self.parser.add_argument('--num_heads', type=int, default=8,\n                                 help='Number of multi-headed attention heads')\n        self.parser.add_argument('--num_layers', type=int, default=3,\n                                 help='Number of transformer encoder layers (blocks)')\n        self.parser.add_argument('--dropout', type=float, default=0.1,\n                                 help='Dropout applied to most transformer encoder layers')\n        self.parser.add_argument('--pos_encoding', choices={'fixed', 'learnable'}, default='learnable',\n                                 help='Internal dimension of transformer embeddings')\n        self.parser.add_argument('--activation', choices={'relu', 'gelu'}, default='gelu',\n                                 help='Activation to be used in transformer encoder')\n        self.parser.add_argument('--normalization_layer', choices={'BatchNorm', 'LayerNorm'}, default='BatchNorm',\n                                 help='Normalization layer to be used internally in transformer encoder')\n\n        # my arg, for k-fold\n        self.parser.add_argument('--ith', type=int, help='the ith training scripts')\n        self.parser.add_argument('--dataset', type=str, help='target dataset name')\n\n        # transfer learning\n        self.parser.add_argument('--weights_save_path', type=str, help='encoder weights saving path')\n        self.parser.add_argument('--load_root', default=None, type=str, help='load root')\n        self.parser.add_argument('--source_dataset', type=str, help='transfer source dataset')\n        self.parser.add_argument('--multi_gpu', type=str)\n\n        # for classification \n        self.parser.add_argument('--nonlinear', type=bool, default=True, help='use linear or non-linear classifier')\n\n    def parse(self):\n\n        args = self.parser.parse_args()\n\n        args.lr_step = [int(i) for i in args.lr_step.split(',')]\n        args.lr_factor = [float(i) for i in args.lr_factor.split(',')]\n        if (len(args.lr_step) > 1) and (len(args.lr_factor) == 1):\n            args.lr_factor = len(args.lr_step) * args.lr_factor  # replicate\n        assert len(args.lr_step) == len(\n            args.lr_factor), \"You must specify as many values in `lr_step` as in `lr_factors`\"\n\n        if args.exclude_feats is not None:\n            args.exclude_feats = [int(i) for i in args.exclude_feats.split(',')]\n        args.mask_feats = [int(i) for i in args.mask_feats.split(',')]\n\n        if args.val_pattern is not None:\n            args.val_ratio = 0\n            args.test_ratio = 0\n\n        return args\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/running.py",
    "content": "import logging\nimport sys\nimport os\nimport traceback\nimport json\nfrom datetime import datetime\nimport string\nimport random\nfrom collections import OrderedDict\nimport time\nimport pickle\nfrom functools import partial\n\nimport ipdb\nimport torch\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport sklearn\n\nfrom utils import utils, analysis\nfrom models.loss import l2_reg_loss\nfrom datasets.dataset import ImputationDataset, TransductionDataset, ClassiregressionDataset, collate_unsuperv, collate_superv\n\n\nlogger = logging.getLogger('__main__')\n\nNEG_METRICS = {'loss'}  # metrics for which \"better\" is less\n\nval_times = {\"total_time\": 0, \"count\": 0}\n\n\ndef pipeline_factory(config, device):\n    \"\"\"For the task specified in the configuration returns the corresponding combination of\n    Dataset class, collate function and Runner class.\"\"\"\n\n    task = config['task']\n    if task == \"pretrain_and_finetune\":\n        return partial(ImputationDataset, mean_mask_length=config['mean_mask_length'],\n                       masking_ratio=config['masking_ratio'], mode=config['mask_mode'],\n                       distribution=config['mask_distribution'], exclude_feats=config['exclude_feats'], device=device), collate_unsuperv, UnsupervisedRunner, partial(ClassiregressionDataset,device=device), collate_superv, SupervisedRunner\n\n    if task == \"imputation\":\n        return partial(ImputationDataset, mean_mask_length=config['mean_mask_length'],\n                       masking_ratio=config['masking_ratio'], mode=config['mask_mode'],\n                       distribution=config['mask_distribution'], exclude_feats=config['exclude_feats'], device=device),\\\n                        collate_unsuperv, UnsupervisedRunner\n    if task == \"transduction\":\n        return partial(TransductionDataset, mask_feats=config['mask_feats'],\n                       start_hint=config['start_hint'], end_hint=config['end_hint']), collate_unsuperv, UnsupervisedRunner\n    if (task == \"classification\") or (task == \"regression\"):\n        return partial(ClassiregressionDataset,device=device), collate_superv, SupervisedRunner\n    else:\n        raise NotImplementedError(\"Task '{}' not implemented\".format(task))\n\n\ndef setup(args):\n    \"\"\"Prepare training session: read configuration from file (takes precedence), create directories.\n    Input:\n        args: arguments object from argparse\n    Returns:\n        config: configuration dictionary\n    \"\"\"\n\n    config = args.__dict__  # configuration dictionary\n\n    if args.config_filepath is not None:\n        logger.info(\"Reading configuration ...\")\n        try:  # dictionary containing the entire configuration settings in a hierarchical fashion\n            config.update(utils.load_config(args.config_filepath))\n        except:\n            logger.critical(\"Failed to load configuration file. Check JSON syntax and verify that files exist\")\n            traceback.print_exc()\n            sys.exit(1)\n\n    # Create output directory\n    initial_timestamp = datetime.now()\n    output_dir = config['output_dir']\n    if not os.path.isdir(output_dir):\n        raise IOError(\n            \"Root directory '{}', where the directory of the experiment will be created, must exist\".format(output_dir))\n\n    output_dir = os.path.join(output_dir, config['experiment_name'])\n\n    formatted_timestamp = initial_timestamp.strftime(\"%Y-%m-%d_%H-%M-%S\")\n    config['initial_timestamp'] = formatted_timestamp\n    if (not config['no_timestamp']) or (len(config['experiment_name']) == 0):\n        rand_suffix = \"\".join(random.choices(string.ascii_letters + string.digits, k=3))\n        output_dir += \"_\" + formatted_timestamp + \"_\" + rand_suffix\n    config['output_dir'] = output_dir\n    config['save_dir'] = os.path.join(output_dir, 'checkpoints')\n    config['pred_dir'] = os.path.join(output_dir, 'predictions')\n    config['tensorboard_dir'] = os.path.join(output_dir, 'tb_summaries')\n    utils.create_dirs([config['save_dir'], config['pred_dir'], config['tensorboard_dir']])\n\n    # Save configuration as a (pretty) json file\n    with open(os.path.join(output_dir, 'configuration.json'), 'w') as fp:\n        json.dump(config, fp, indent=4, sort_keys=True)\n\n    logger.info(\"Stored configuration file in '{}'\".format(output_dir))\n\n    return config\n\n\ndef fold_evaluate(dataset, model, device, loss_module, target_feats, config, dataset_name):\n\n    allfolds = {'target_feats': target_feats,  # list of len(num_folds), each element: list of target feature integer indices\n                'predictions': [],  # list of len(num_folds), each element: (num_samples, seq_len, feat_dim) prediction per sample\n                'targets': [],  # list of len(num_folds), each element: (num_samples, seq_len, feat_dim) target/original input per sample\n                'target_masks': [],  # list of len(num_folds), each element: (num_samples, seq_len, feat_dim) boolean mask per sample\n                'metrics': [],  # list of len(num_folds), each element: (num_samples, num_metrics) metric per sample\n                'IDs': []}  # list of len(num_folds), each element: (num_samples,) ID per sample\n\n    for i, tgt_feats in enumerate(target_feats):\n\n        dataset.mask_feats = tgt_feats  # set the transduction target features\n\n        loader = DataLoader(dataset=dataset,\n                            batch_size=config['batch_size'],\n                            shuffle=False,\n                            num_workers=config['num_workers'],\n                            pin_memory=True,\n                            collate_fn=lambda x: collate_unsuperv(x, max_len=config['max_seq_len']))\n\n        evaluator = UnsupervisedRunner(model, loader, device, loss_module,\n                                       print_interval=config['print_interval'], console=config['console'])\n\n        logger.info(\"Evaluating {} set, fold: {}, target features: {}\".format(dataset_name, i, tgt_feats))\n        aggr_metrics, per_batch = evaluate(evaluator)\n\n        metrics_array = convert_metrics_per_batch_to_per_sample(per_batch['metrics'], per_batch['target_masks'])\n        metrics_array = np.concatenate(metrics_array, axis=0)\n        allfolds['metrics'].append(metrics_array)\n        allfolds['predictions'].append(np.concatenate(per_batch['predictions'], axis=0))\n        allfolds['targets'].append(np.concatenate(per_batch['targets'], axis=0))\n        allfolds['target_masks'].append(np.concatenate(per_batch['target_masks'], axis=0))\n        allfolds['IDs'].append(np.concatenate(per_batch['IDs'], axis=0))\n\n        metrics_mean = np.mean(metrics_array, axis=0)\n        metrics_std = np.std(metrics_array, axis=0)\n        for m, metric_name in enumerate(list(aggr_metrics.items())[1:]):\n            logger.info(\"{}:: Mean: {:.3f}, std: {:.3f}\".format(metric_name, metrics_mean[m], metrics_std[m]))\n\n    pred_filepath = os.path.join(config['pred_dir'], dataset_name + '_fold_transduction_predictions.pickle')\n    logger.info(\"Serializing predictions into {} ... \".format(pred_filepath))\n    with open(pred_filepath, 'wb') as f:\n        pickle.dump(allfolds, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef convert_metrics_per_batch_to_per_sample(metrics, target_masks):\n    \"\"\"\n    Args:\n        metrics: list of len(num_batches), each element: list of len(num_metrics), each element: (num_active_in_batch,) metric per element\n        target_masks: list of len(num_batches), each element: (batch_size, seq_len, feat_dim) boolean mask: 1s active, 0s ignore\n    Returns:\n        metrics_array = list of len(num_batches), each element: (batch_size, num_metrics) metric per sample\n    \"\"\"\n    metrics_array = []\n    for b, batch_target_masks in enumerate(target_masks):\n        num_active_per_sample = np.sum(batch_target_masks, axis=(1, 2))\n        batch_metrics = np.stack(metrics[b], axis=1)  # (num_active_in_batch, num_metrics)\n        ind = 0\n        metrics_per_sample = np.zeros((len(num_active_per_sample), batch_metrics.shape[1]))  # (batch_size, num_metrics)\n        for n, num_active in enumerate(num_active_per_sample):\n            new_ind = ind + num_active\n            metrics_per_sample[n, :] = np.sum(batch_metrics[ind:new_ind, :], axis=0)\n            ind = new_ind\n        metrics_array.append(metrics_per_sample)\n    return metrics_array\n\n\ndef evaluate(evaluator):\n    \"\"\"Perform a single, one-off evaluation on an evaluator object (initialized with a dataset)\"\"\"\n\n    eval_start_time = time.time()\n    with torch.no_grad():\n        aggr_metrics, per_batch = evaluator.evaluate(epoch_num=None, keep_all=True)\n    eval_runtime = time.time() - eval_start_time\n    print()\n    print_str = 'Evaluation Summary: '\n    for k, v in aggr_metrics.items():\n        if v is not None:\n            print_str += '{}: {:8f} | '.format(k, v)\n    logger.info(print_str)\n    logger.info(\"Evaluation runtime: {} hours, {} minutes, {} seconds\\n\".format(*utils.readable_time(eval_runtime)))\n\n    return aggr_metrics, per_batch\n\n\ndef validate(val_evaluator, tensorboard_writer, config, best_metrics, best_value, epoch):\n    \"\"\"Run an evaluation on the validation set while logging metrics, and handle outcome\"\"\"\n\n    eval_start_time = time.time()\n    with torch.no_grad():\n        aggr_metrics = val_evaluator.evaluate(epoch)\n    eval_runtime = time.time() - eval_start_time\n\n    global val_times\n    val_times[\"total_time\"] += eval_runtime\n    val_times[\"count\"] += 1\n    avg_val_time = val_times[\"total_time\"] / val_times[\"count\"]\n\n    condition = (aggr_metrics['loss'] < best_value)\n\n    if condition or epoch==1:\n        best_value = aggr_metrics['loss']\n        best_metrics = aggr_metrics.copy()\n\n\n    return aggr_metrics, best_metrics, best_value, condition\n\n\n\ndef check_progress(epoch):\n\n    if epoch in [100, 140, 160, 220, 280, 340]:\n        return True\n    else:\n        return False\n\n\nclass BaseRunner(object):\n\n    def __init__(self, model, dataloader, device, loss_module, optimizer=None, l2_reg=None, print_interval=10, console=True):\n\n        self.model = model\n        self.dataloader = dataloader\n        self.device = device\n        self.optimizer = optimizer\n        self.loss_module = loss_module\n        self.l2_reg = l2_reg\n        self.print_interval = print_interval\n        self.printer = utils.Printer(console=console)\n\n        self.epoch_metrics = OrderedDict()\n\n    def train_epoch(self, epoch_num=None):\n        raise NotImplementedError('Please override in child class')\n\n    def evaluate(self, epoch_num=None, keep_all=True):\n        raise NotImplementedError('Please override in child class')\n\n    def print_callback(self, i_batch, metrics, prefix=''):\n\n        total_batches = len(self.dataloader)\n\n        template = \"{:5.1f}% | batch: {:9d} of {:9d}\"\n        content = [100 * (i_batch / total_batches), i_batch, total_batches]\n        for met_name, met_value in metrics.items():\n            template += \"\\t|\\t{}\".format(met_name) + \": {:g}\"\n            content.append(met_value)\n\n        dyn_string = template.format(*content)\n        dyn_string = prefix + dyn_string\n        self.printer.print(dyn_string)\n\n\nclass UnsupervisedRunner(BaseRunner):\n\n    def train_epoch(self, epoch_num=None):\n\n        self.model = self.model.train()\n\n        epoch_loss = 0  # total loss of epoch\n        total_active_elements = 0  # total unmasked elements in epoch\n        for i, batch in enumerate(self.dataloader):\n            X, targets, target_masks, padding_masks, IDs = batch\n            targets = targets.to(self.device)\n            target_masks = target_masks.to(self.device)  # 1s: mask and predict, 0s: unaffected input (ignore)\n            padding_masks = padding_masks.to(self.device)  # 0s: ignore\n\n            predictions = self.model(X.to(self.device), padding_masks)  # (batch_size, padded_length, feat_dim)\n\n            # Cascade noise masks (batch_size, padded_length, feat_dim) and padding masks (batch_size, padded_length)\n            target_masks = target_masks * padding_masks.unsqueeze(-1)\n            loss = self.loss_module(predictions, targets, target_masks)  # (num_active,) individual loss (square error per element) for each active value in batch\n            batch_loss = torch.sum(loss)\n            mean_loss = batch_loss / len(loss)  # mean loss (over active elements) used for optimization\n\n            if self.l2_reg:\n                total_loss = mean_loss + self.l2_reg * l2_reg_loss(self.model)\n            else:\n                total_loss = mean_loss\n\n            # Zero gradients, perform a backward pass, and update the weights.\n            self.optimizer.zero_grad()\n            total_loss.backward()\n\n            # torch.nn.utils.clip_grad_value_(self.model.parameters(), clip_value=1.0)\n            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=4.0)\n            self.optimizer.step()\n\n            metrics = {\"loss\": mean_loss.item()}\n            if i % self.print_interval == 0:\n                ending = \"\" if epoch_num is None else 'Epoch {} '.format(epoch_num)\n                self.print_callback(i, metrics, prefix='Training ' + ending)\n\n            with torch.no_grad():\n                total_active_elements += len(loss)\n                epoch_loss += batch_loss.item()  # add total loss of batch\n\n        epoch_loss = epoch_loss / total_active_elements  # average loss per element for whole epoch\n        self.epoch_metrics['epoch'] = epoch_num\n        self.epoch_metrics['loss'] = epoch_loss\n        return self.epoch_metrics\n\n    def evaluate(self, epoch_num=None, keep_all=False):\n\n        self.model = self.model.eval()\n\n        epoch_loss = 0  # total loss of epoch\n        total_active_elements = 0  # total unmasked elements in epoch\n\n        if keep_all:\n            per_batch = {'target_masks': [], 'targets': [], 'predictions': [], 'metrics': [], 'IDs': []}\n        for i, batch in enumerate(self.dataloader):\n\n            X, targets, target_masks, padding_masks, IDs = batch\n            targets = targets.to(self.device)\n            target_masks = target_masks.to(self.device)  # 1s: mask and predict, 0s: unaffected input (ignore)\n            padding_masks = padding_masks.to(self.device)  # 0s: ignore\n\n            # TODO: for debugging\n            # input_ok = utils.check_tensor(X, verbose=False, zero_thresh=1e-8, inf_thresh=1e4)\n            # if not input_ok:\n            #     print(\"Input problem!\")\n            #     ipdb.set_trace()\n            #\n            # utils.check_model(self.model, verbose=False, stop_on_error=True)\n\n            predictions = self.model(X.to(self.device), padding_masks)  # (batch_size, padded_length, feat_dim)\n\n            # Cascade noise masks (batch_size, padded_length, feat_dim) and padding masks (batch_size, padded_length)\n            target_masks = target_masks * padding_masks.unsqueeze(-1)\n            loss = self.loss_module(predictions, targets, target_masks)  # (num_active,) individual loss (square error per element) for each active value in batch\n            batch_loss = torch.sum(loss).cpu().item()\n            mean_loss = batch_loss / len(loss)  # mean loss (over active elements) used for optimization the batch\n\n            if keep_all:\n                per_batch['target_masks'].append(target_masks.cpu().numpy())\n                per_batch['targets'].append(targets.cpu().numpy())\n                per_batch['predictions'].append(predictions.cpu().numpy())\n                per_batch['metrics'].append([loss.cpu().numpy()])\n                per_batch['IDs'].append(IDs)\n\n            metrics = {\"loss\": mean_loss}\n            if i % self.print_interval == 0:\n                ending = \"\" if epoch_num is None else 'Epoch {} '.format(epoch_num)\n                self.print_callback(i, metrics, prefix='Evaluating ' + ending)\n\n            total_active_elements += len(loss)\n            epoch_loss += batch_loss  # add total loss of batch\n\n        epoch_loss = epoch_loss / total_active_elements  # average loss per element for whole epoch\n        self.epoch_metrics['epoch'] = epoch_num\n        self.epoch_metrics['loss'] = epoch_loss\n\n        if keep_all:\n            return self.epoch_metrics, per_batch\n        else:\n            return self.epoch_metrics\n\n\nclass SupervisedRunner(BaseRunner):\n\n    def __init__(self, *args, **kwargs):\n\n        super(SupervisedRunner, self).__init__(*args, **kwargs)\n\n        if isinstance(args[3], torch.nn.CrossEntropyLoss):\n            self.classification = True  # True if classification, False if regression\n            self.analyzer = analysis.Analyzer(print_conf_mat=True)\n        else:\n            self.classification = False\n\n    def train_epoch(self, epoch_num=None):\n\n        self.model = self.model.train()\n\n        epoch_loss = 0  # total loss of epoch\n        total_samples = 0  # total samples in epoch\n\n        for i, batch in enumerate(self.dataloader):\n\n            X, targets, padding_masks, IDs = batch\n            targets = targets.to(self.device)\n            padding_masks = padding_masks.to(self.device)  # 0s: ignore\n            # regression: (batch_size, num_labels); classification: (batch_size, num_classes) of logits\n            predictions = self.model(X.to(self.device), padding_masks)\n\n            targets_label = targets.view((1, -1))\n            pred_label = torch.argmax(predictions, axis=1)\n            step_accu = torch.sum(pred_label == targets_label, dim=1)\n\n            loss = self.loss_module(predictions, targets)  # (batch_size,) loss for each sample in the batch\n            batch_loss = torch.sum(loss)\n            mean_loss = batch_loss / len(loss)  # mean loss (over samples) used for optimization\n\n            if self.l2_reg:\n                total_loss = mean_loss + self.l2_reg * l2_reg_loss(self.model)\n            else:\n                total_loss = mean_loss\n\n            # Zero gradients, perform a backward pass, and update the weights.\n            self.optimizer.zero_grad()\n            total_loss.backward()\n\n\n            # torch.nn.utils.clip_grad_value_(self.model.parameters(), clip_value=1.0)\n            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=4.0)\n            # multi gpu\n            self.optimizer.step()\n            #self.optimizer.step()\n\n            metrics = {\"loss\": mean_loss.item(), \"accuracy\":step_accu.cpu().item()/len(loss)}\n\n            if i % self.print_interval == 0:\n                ending = \"\" if epoch_num is None else 'Epoch {} '.format(epoch_num)\n                self.print_callback(i, metrics, prefix='Supervised Training ' + ending)\n\n            with torch.no_grad():\n                total_samples += len(loss)\n                epoch_loss += batch_loss.item()  # add total loss of batch\n\n        epoch_loss = epoch_loss / total_samples  # average loss per sample for whole epoch\n        self.epoch_metrics['epoch'] = epoch_num\n        self.epoch_metrics['loss'] = epoch_loss\n        return self.epoch_metrics\n\n    def evaluate(self, epoch_num=None, keep_all=True):\n\n        self.model = self.model.eval()\n\n        epoch_loss = 0  # total loss of epoch\n        total_samples = 0  # total samples in epoch\n\n        epoch_accu = 0\n        sum_len = 0\n        for i, batch in enumerate(self.dataloader):\n\n            X, targets, padding_masks, IDs = batch # origin:ids\n            targets = targets.to(self.device)\n            padding_masks = padding_masks.to(self.device)  # 0s: ignore\n            # regression: (batch_size, num_labels); classification: (batch_size, num_classes) of logits\n            predictions = self.model(X.to(self.device), padding_masks)\n\n            targets_label = targets.view((1, -1))\n            pred_label = torch.argmax(predictions, axis=1)\n           \n            epoch_accu += torch.sum(pred_label == targets_label, dim=1)\n\n            sum_len += len(targets)\n            loss = self.loss_module(predictions, targets)  # (batch_size,) loss for each sample in the batch\n            batch_loss = torch.sum(loss).cpu().item()\n            mean_loss = batch_loss / len(loss)  # mean loss (over samples)\n\n\n            metrics = {\"loss\": mean_loss, \"accuracy\":epoch_accu.cpu().item()/len(loss)}\n            if i % self.print_interval == 0:\n                ending = \"\" if epoch_num is None else 'Epoch {} '.format(epoch_num)\n                self.print_callback(i, metrics, prefix='Supervised Evaluation ' + ending)\n\n            total_samples += len(loss)\n            epoch_loss += batch_loss  # add total loss of batch\n\n        epoch_loss = epoch_loss / total_samples  # average loss per element for whole epoch\n        epoch_accu = epoch_accu / sum_len\n\n\n        self.epoch_metrics['epoch'] = epoch_num\n        self.epoch_metrics['loss'] = epoch_loss\n        self.epoch_metrics['accuracy'] = epoch_accu\n\n        return self.epoch_metrics\n"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/utils/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/tst_cls/src/utils/analysis.py",
    "content": "\"\"\"\nCollection of functions which enable the evaluation of a classifier's performance,\nby showing confusion matrix, accuracy, recall, precision etc.\n\"\"\"\n\nimport numpy as np\nimport sys\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn import metrics\nfrom tabulate import tabulate\nimport math\nimport logging\nfrom datetime import datetime\n\n\ndef acc_top_k(predictions, y_true):\n    \"\"\"Accuracy when allowing for correct class being in the top k predictions.\n\n    Arguments:\n        predictions: (N_samples, k) array of top class indices (pre-sorted class indices based on score) per sample\n        y_true: N_samples 1D-array of ground truth labels (integer indices)\n    Returns:\n        length k 1D-array of accuracy when allowing for correct class being in top 1, 2, ... k predictions\"\"\"\n\n    y_true = y_true[:, np.newaxis]\n\n    # Create upper triangular matrix of ones, to be used in construction of V\n    building_blocks = np.zeros((predictions.shape[1], predictions.shape[1]))\n    building_blocks[np.triu_indices(predictions.shape[1])] = 1\n\n    # A matrix of the same shape as predictions. For each sample, the index corresponding\n    # to a correct prediction is 1, as well as all following indices.\n    # Example: y_true = [1,0], predictions = [[1 5 4],[2 0 3]]. Then: V = [[1 1 1],[0 1 1]]\n    V = np.zeros_like(predictions, dtype=int)  # validity matrix\n    sample_ind, rank_ind = np.where(predictions == y_true)\n\n    V[sample_ind, :] = building_blocks[rank_ind, :]\n\n    return np.mean(V, axis=0)\n\n\ndef accuracy(y_pred, y_true, excluded_labels=None):\n    \"\"\"A simple accuracy calculator, which can ignore labels specified in a list\"\"\"\n\n    if excluded_labels is None:\n        return np.mean(y_pred == y_true)\n    else:\n        included = (y_pred != excluded_labels[0]) & (y_true != excluded_labels[0])\n        # The following extra check (rather than initializing with an array of ones)\n        # is done because a single excluded label is the most common case\n        if len(excluded_labels) > 1:\n            for label in excluded_labels[1:]:\n                included &= (y_pred != label) & (y_true != label)\n\n        return np.mean(y_pred[included] == y_true[included])\n\n\ndef precision(y_true, y_pred, label):\n    \"\"\"Returns precision for the specified class index\"\"\"\n\n    predicted_in_C = (y_pred == label)\n    num_pred_in_C = np.sum(predicted_in_C)\n    if num_pred_in_C == 0:\n        return 0\n    return np.sum(y_true[predicted_in_C] == label) / num_pred_in_C\n\n\ndef recall(y_true, y_pred, label):\n    \"\"\"Returns recall for the specified class index\"\"\"\n\n    truly_in_C = (y_true == label)\n    num_truly_in_C = np.sum(truly_in_C)\n    if num_truly_in_C == 0:\n        return 0  # or NaN?\n    return np.sum(y_pred[truly_in_C] == label) / num_truly_in_C\n\n\ndef limiter(metric_functions, y_true, y_pred, y_scores, score_thr, label):\n    \"\"\"Wraps a list of metric functions, i.e precison or recall, by ingoring predictions under the\n    specified threshold for a specific class.\n    \"\"\"\n\n    ltd_pred = np.copy(y_pred)\n    ltd_pred[(ltd_pred == label) & (y_scores < score_thr)] = -1\n\n    output = [func(y_true, ltd_pred, label) for func in metric_functions]\n\n    return output\n\n\ndef prec_rec_parametrized_by_thr(y_true, y_pred, y_scores, label, Npoints, min_score=None, max_score=None):\n    \"\"\"Returns an array showing for a specified class of interest, how precision and recall change as a function of\n        the score threshold (parameter).\n\n    Input:\n        y_true: 1D array of true labels (class indices)\n        y_pred: 1D array of predicted labels (class indices)\n        y_scores: 1D array of scores corresponding to predictions in y_pred\n        label: class label of interest\n        Npoints: number of score threshold points. Defines \"resolution\" of the parameter (score threshold)\n        min_score, max_score: if specified, they impose lower and upper bound limits for the parameter (score thr.)\n    Output:\n        prec_rec: ndarray of shape (Npoints, 2), containing a precision (column 0) and recall (column 1) value for each\n            score threshold value\n    \"\"\"\n\n    if (min_score is None) or (max_score is None):\n        predicted_in_C = (y_pred == label)\n        min_score = 0.99 * np.amin(y_scores[predicted_in_C])  # guarantees that all predictions are kept\n        max_score = 1.01 * np.amax(y_scores[predicted_in_C])  # guarantees that no prediction is kept\n\n    grid = np.linspace(min_score, max_score, Npoints)\n\n    measure = lambda x: limiter([precision, recall], y_true, y_pred, y_scores, x, label)\n\n    return np.array(map(measure, grid)), grid\n\n\ndef plot_prec_vs_rec(score_grid, rec, prec, prec_requirement=None, thr_opt=None, title=None, show=True, save_as=None):\n    \"\"\"Plots a figure depicting precision and recall as a function of the score threshold.\n    Optionally also depicts an imposed precision requirement and a chosen score threshold value.\"\"\"\n\n    if not (thr_opt is None):\n        thr_opt = thr_opt if not (math.isinf(thr_opt)) else None\n\n    plt.figure()\n    if title:\n        plt.suptitle(title)\n\n    # Recall and Precision vs. Score Threshold\n    plt.subplot(211)\n    l_rec, = plt.plot(score_grid, rec, '.-')\n\n    plt.hold(True)\n    l_prec, = plt.plot(score_grid, prec, 'g.-')\n    plt.ylim((0, 1.01))\n    plt.xlabel('score threshold')\n\n    legend_lines = [l_rec, l_prec]\n    legend_labels = ['recall', 'precision']\n\n    if prec_requirement:\n        l_prec_req = plt.axhline(prec_requirement, color='r', linestyle='--')\n        legend_lines.append(l_prec_req)\n        legend_labels.append('prec. req.')\n\n    if not (thr_opt is None):\n        l_score_thr = plt.axvline(thr_opt, color='r')\n        legend_lines.append(l_score_thr)\n        legend_labels.append('opt. thr.')\n\n    plt.legend(legend_lines, legend_labels, loc='lower right', fontsize=10)\n\n    # Recall vs. Precision\n    plt.subplot(212)\n    plt.plot(prec, rec, '.-')\n\n    plt.ylim((0, 1.01))\n    plt.xlim((0, 1.01))\n    plt.ylabel('recall')\n    plt.xlabel('precision')\n\n    if prec_requirement:\n        l_prec_req = plt.axvline(prec_requirement, color='r', linestyle='--')\n        plt.legend([l_prec_req], ['precision req.'], loc='lower left', fontsize=10)\n\n    if save_as:\n        plt.savefig(save_as, bbox_inches='tight', format='pdf')\n\n    if show:\n        plt.tight_layout()\n        plt.show(block=False)\n\n\ndef plot_confusion_matrix(ConfMat, label_strings=None, title='Confusion matrix', cmap=plt.cm.get_cmap('Blues')):\n    \"\"\"Plot confusion matrix in a separate window\"\"\"\n    plt.imshow(ConfMat, interpolation='nearest', cmap=cmap)\n    plt.title(title)\n    plt.colorbar()\n    if label_strings:\n        tick_marks = np.arange(len(label_strings))\n        plt.xticks(tick_marks, label_strings, rotation=90)\n        plt.yticks(tick_marks, label_strings)\n    plt.tight_layout()\n    plt.ylabel('True label')\n    plt.xlabel('Predicted label')\n\n\ndef print_confusion_matrix(ConfMat, label_strings=None, title='Confusion matrix'):\n    \"\"\"Print confusion matrix as text to terminal\"\"\"\n\n    if label_strings is None:\n        label_strings = ConfMat.shape[0] * ['']\n\n    print(title)\n    print(len(title) * '-')\n    # Make printable matrix:\n    print_mat = []\n    for i, row in enumerate(ConfMat):\n        print_mat.append([label_strings[i]] + list(row))\n    print(tabulate(print_mat, headers=['True\\Pred'] + label_strings, tablefmt='orgtbl'))\n\n\nclass Analyzer(object):\n\n    def __init__(self, maxcharlength=35, plot=False, print_conf_mat=False, output_filepath=None):\n\n        self.maxcharlength = maxcharlength\n        self.plot = plot\n        self.print_conf_mat = print_conf_mat\n\n        # create logger\n        self.logID = str(\n            datetime.now())  # this is to enable individual logging configuration between different instances\n        self.logger = logging.getLogger(self.logID)\n        self.logger.setLevel(logging.INFO)\n        formatter = logging.Formatter('%(message)s')\n\n        # create console handler\n        ch = logging.StreamHandler(sys.stdout)\n        ch.setLevel(logging.INFO)\n        ch.setFormatter(formatter)\n        self.logger.addHandler(ch)\n\n        if output_filepath:\n            # create file handler\n            fh = logging.FileHandler(output_filepath)\n            fh.setLevel(logging.INFO)\n            fh.setFormatter(formatter)\n            self.logger.addHandler(fh)\n\n    def show_acc_top_k_improvement(self, y_pred, y_true, k=5, inp='scores'):\n        \"\"\"\n        Show how accuracy improves when considering the event of the correct label being among the top k predictions as a successful prediction\n        Arguments:\n            k: integer k mentioned above\n            inp: string, one of 'scores' or 'indices', defining assumptions for `y_pred`, see below\n            y_pred: If inp is 'indices', then this is a (N_samples, k) array of top class indices (pre-sorted class indices based on score) per sample\n                If inp is 'scores', then this is assummed to be a (N_samples, C) array of class scores per sample, where C is the number of classes\n            y_true: (N_samples,) 1D numpy array of ground truth labels (integer indices)\n        \"\"\"\n\n        print('How accuracy improves when allowing correct result being in the top 1, 2, ..., k predictions:\\n')\n\n        if inp == 'scores':\n            predictions = np.argsort(y_pred, axis=1)[:, ::-1]  # sort in descending order\n        else:\n            predictions = y_pred\n\n        predictions = predictions[:, :min(k, predictions.shape[1])]  # take top k\n\n        accuracy_per_rank = acc_top_k(predictions, y_true)\n\n        row1 = ['k'] + range(1, len(accuracy_per_rank) + 1)\n        row2 = ['Accuracy'] + list(accuracy_per_rank)\n        print(tabulate([row1, row2], tablefmt='orgtbl'))\n\n        if self.plot:\n            from matplotlib.ticker import MaxNLocator\n\n            ax = plt.figure().gca()\n            plt.plot(np.arange(1, k + 1, dtype=int), accuracy_per_rank, '.-')\n            ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n            plt.xlabel('Number of allowed predictions (k)')\n            plt.ylabel('Cumulative accuracy\\n(prob. of correct result being in top k pred.)')\n            plt.title('Cumulative Accuracy vs Number of allowed predictions')\n\n            plt.show(block=False)\n\n        return accuracy_per_rank\n\n    def generate_classification_report(self, digits=3, number_of_thieves=2, maxcharlength=35):\n        \"\"\"\n        Returns a string of a report for given metric arrays (array length equals the number of classes).\n        Called internally by `analyze_classification`.\n            digits: number of digits after . for displaying results\n            number_of_thieves: number of biggest thieves to report\n            maxcharlength: max. number of characters to use when displaying thief names\n        \"\"\"\n\n        relative_freq = self.support / np.sum(self.support)  # relative frequencies of each class in the true lables\n        sorted_class_indices = np.argsort(relative_freq)[\n                               ::-1]  # sort by \"importance\" of classes (i.e. occurance frequency)\n\n        last_line_heading = 'avg / total'\n\n        width = max(len(cn) for cn in self.existing_class_names)\n        width = max(width, len(last_line_heading), digits)\n\n        headers = [\"precision\", \"recall\", \"f1-score\", \"rel. freq.\", \"abs. freq.\", \"biggest thieves\"]\n        fmt = '%% %ds' % width  # first column: class name\n        fmt += '  '\n        fmt += ' '.join(['% 10s' for _ in headers[:-1]])\n        fmt += '|\\t % 5s'\n        fmt += '\\n'\n\n        headers = [\"\"] + headers\n        report = fmt % tuple(headers)\n        report += '\\n'\n\n        for i in sorted_class_indices:\n            values = [self.existing_class_names[i]]\n            for v in (self.precision[i], self.recall[i], self.f1[i],\n                      relative_freq[i]):  # v is NOT a tuple, just goes through this list 1 el. at a time\n                values += [\"{0:0.{1}f}\".format(v, digits)]\n            values += [\"{}\".format(self.support[i])]\n            thieves = np.argsort(self.ConfMatrix_normalized_row[i, :])[::-1][\n                      :number_of_thieves + 1]  # other class indices \"stealing\" from class. May still contain self\n            thieves = thieves[thieves != i]  # exclude self at this point\n            steal_ratio = self.ConfMatrix_normalized_row[i, thieves]\n            thieves_names = [\n                self.existing_class_names[thief][:min(maxcharlength, len(self.existing_class_names[thief]))] for thief\n                in thieves]  # a little inefficient but inconsequential\n            string_about_stealing = \"\"\n            for j in range(len(thieves)):\n                string_about_stealing += \"{0}: {1:.3f},\\t\".format(thieves_names[j], steal_ratio[j])\n            values += [string_about_stealing]\n\n            report += fmt % tuple(values)\n\n        report += '\\n' + 100 * '-' + '\\n'\n\n        # compute averages/sums\n        values = [last_line_heading]\n        for v in (np.average(self.precision, weights=relative_freq),\n                  np.average(self.recall, weights=relative_freq),\n                  np.average(self.f1, weights=relative_freq)):\n            values += [\"{0:0.{1}f}\".format(v, digits)]\n        values += ['{0}'.format(np.sum(relative_freq))]\n        values += ['{0}'.format(np.sum(self.support))]\n        values += ['']\n\n        # make last (\"Total\") line for report\n        report += fmt % tuple(values)\n\n        return report\n\n    def get_avg_prec_recall(self, ConfMatrix, existing_class_names, excluded_classes=None):\n        \"\"\"Get average recall and precision, using class frequencies as weights, optionally excluding\n        specified classes\"\"\"\n\n        class2ind = dict(zip(existing_class_names, range(len(existing_class_names))))\n        included_c = np.full(len(existing_class_names), 1, dtype=bool)\n\n        if not (excluded_classes is None):\n            excl_ind = [class2ind[excl_class] for excl_class in excluded_classes]\n            included_c[excl_ind] = False\n\n        pred_per_class = np.sum(ConfMatrix, axis=0)\n        nonzero_pred = (pred_per_class > 0)\n\n        included = included_c & nonzero_pred\n        support = np.sum(ConfMatrix, axis=1)\n        weights = support[included] / np.sum(support[included])\n\n        prec = np.diag(ConfMatrix[included, :][:, included]) / pred_per_class[included]\n        prec_avg = np.dot(weights, prec)\n\n        # rec = np.diag(ConfMatrix[included_c,:][:,included_c])/support[included_c]\n        rec_avg = np.trace(ConfMatrix[included_c, :][:, included_c]) / np.sum(support[included_c])\n\n        return prec_avg, rec_avg\n\n    def prec_rec_histogram(self, precision, recall, binedges=None):\n        \"\"\"Make a histogram with the distribution of classes with respect to precision and recall\n        \"\"\"\n\n        if binedges is None:\n            binedges = np.concatenate((np.arange(0, 0.6, 0.2), np.arange(0.6, 1.01, 0.1)), axis=0)\n            binedges = np.append(binedges, binedges[-1] + 0.1)  # add 1 extra bin at the end for >= 1\n\n        hist_precision, binedges = np.histogram(precision, binedges)\n        hist_recall, binedges = np.histogram(recall, binedges)\n\n        print(\"\\n\\nDistribution of classes with respect to PRECISION: \")\n        for b in range(len(binedges) - 1):\n            print(\"[{:.1f}, {:.1f}): {}\".format(binedges[b], binedges[b + 1], hist_precision[b]))\n\n        print(\"\\n\\nDistribution of classes with respect to RECALL: \")\n        for b in range(len(binedges) - 1):\n            print(\"[{:.1f}, {:.1f}): {}\".format(binedges[b], binedges[b + 1], hist_recall[b]))\n\n        if self.plot:\n            plt.figure()\n            plt.subplot(121)\n            widths = np.diff(binedges)\n            plt.bar(binedges[:-1], hist_precision, width=widths, align='edge')\n            plt.xlim(0, 1)\n            ax = plt.gca()\n            ax.set_xticks(binedges)\n            plt.xlabel('Precision')\n            plt.ylabel('Number of classes')\n            plt.title(\"Distribution of classes with respect to precision\")\n\n            plt.subplot(122)\n            widths = np.diff(binedges)\n            plt.bar(binedges[:-1], hist_recall, width=widths, align='edge')\n            plt.xlim(0, 1)\n            ax = plt.gca()\n            ax.set_xticks(binedges)\n            plt.xlabel('Recall')\n            plt.ylabel('Number of classes')\n            plt.title(\"Distribution of classes with respect to recall\")\n\n            plt.show(block=False)\n\n    def analyze_classification(self, y_pred, y_true, class_names, excluded_classes=None):\n        \"\"\"\n        For an array of label predictions and the respective true labels, shows confusion matrix, accuracy, recall, precision etc:\n        Input:\n            y_pred: 1D array of predicted labels (class indices)\n            y_true: 1D array of true labels (class indices)\n            class_names: 1D array or list of class names in the order of class indices.\n                Could also be integers [0, 1, ..., num_classes-1].\n            excluded_classes: list of classes to be excluded from average precision, recall calculation (e.g. OTHER)\n        \"\"\"\n\n        # Trim class_names to include only classes existing in y_pred OR y_true\n        in_pred_labels = set(list(y_pred))\n        in_true_labels = set(list(y_true))\n\n        self.existing_class_ind = sorted(list(in_pred_labels | in_true_labels))\n        class_strings = [str(name) for name in class_names]  # needed in case `class_names` elements are not strings\n        self.existing_class_names = [class_strings[ind][:min(self.maxcharlength, len(class_strings[ind]))] for ind in\n                                     self.existing_class_ind]  # a little inefficient but inconsequential\n\n        # Confusion matrix\n        ConfMatrix = metrics.confusion_matrix(y_true, y_pred)\n\n        '''\n        if self.print_conf_mat:\n            print_confusion_matrix(ConfMatrix, label_strings=self.existing_class_names, title='Confusion matrix')\n            print('\\n')\n        if self.plot:\n            plt.figure()\n            plot_confusion_matrix(ConfMatrix, self.existing_class_names)\n        '''\n        # Normalize the confusion matrix by row (i.e by the number of samples in each class)\n        self.ConfMatrix_normalized_row = ConfMatrix.astype('float') / ConfMatrix.sum(axis=1)[:, np.newaxis]\n\n        '''\n        if self.print_conf_mat:\n            print_confusion_matrix(self.ConfMatrix_normalized_row, label_strings=self.existing_class_names,\n                                   title='Confusion matrix normalized by row')\n            print('\\n')\n        if self.plot:\n            plt.figure()\n            plot_confusion_matrix(self.ConfMatrix_normalized_row, label_strings=self.existing_class_names,\n                                  title='Confusion matrix normalized by row')\n\n            plt.show(block=False)\n        '''\n        # Analyze results\n        self.total_accuracy = np.trace(ConfMatrix) / len(y_true)\n        print('Overall accuracy: {:.3f}\\n'.format(self.total_accuracy))\n\n        # returns metrics for each class, in the same order as existing_class_names\n        self.precision, self.recall, self.f1, self.support = metrics.precision_recall_fscore_support(y_true, y_pred,\n                                                                                                     labels=self.existing_class_ind)\n\n        # Print report\n        #print(self.generate_classification_report())\n\n        # Calculate average precision and recall\n        self.prec_avg, self.rec_avg = self.get_avg_prec_recall(ConfMatrix, self.existing_class_names, excluded_classes)\n        if excluded_classes:\n            print(\n                \"\\nAverage PRECISION: {:.2f}\\n(using class frequencies as weights, excluding classes with no predictions and predictions in '{}')\".format(\n                    self.prec_avg, ', '.join(excluded_classes)))\n            print(\n                \"\\nAverage RECALL (= ACCURACY): {:.2f}\\n(using class frequencies as weights, excluding classes in '{}')\".format(\n                    self.rec_avg, ', '.join(excluded_classes)))\n\n        # Make a histogram with the distribution of classes with respect to precision and recall\n        self.prec_rec_histogram(self.precision, self.recall)\n\n        return {\"total_accuracy\": self.total_accuracy, \"precision\": self.precision, \"recall\": self.recall,\n                \"f1\": self.f1, \"support\": self.support, \"prec_avg\": self.prec_avg, \"rec_avg\": self.rec_avg}"
  },
  {
    "path": "ts_classification_methods/tst_cls/src/utils/utils.py",
    "content": "import json\nimport os\nimport sys\nimport builtins\nimport functools\nimport time\nimport ipdb\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\nimport xlrd\nimport xlwt\nfrom xlutils.copy import copy\n\nimport logging\nlogging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef timer(func):\n    \"\"\"Print the runtime of the decorated function\"\"\"\n    @functools.wraps(func)\n    def wrapper_timer(*args, **kwargs):\n        start_time = time.perf_counter()    # 1\n        value = func(*args, **kwargs)\n        end_time = time.perf_counter()      # 2\n        run_time = end_time - start_time    # 3\n        print(f\"Finished {func.__name__!r} in {run_time} secs\")\n        return value\n    return wrapper_timer\n\n\ndef save_model(path, epoch, model, optimizer=None):\n    if isinstance(model, torch.nn.DataParallel):\n        state_dict = model.module.state_dict()\n    else:\n        state_dict = model.state_dict()\n    data = {'epoch': epoch,\n            'state_dict': state_dict}\n    if not (optimizer is None):\n        data['optimizer'] = optimizer.state_dict()\n    torch.save(data, path)\n\n\ndef load_model(model, model_path, optimizer=None, resume=False, change_output=False,\n               lr=None, lr_step=None, lr_factor=None):\n    start_epoch = 0\n    checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)\n    state_dict = deepcopy(checkpoint['state_dict'])\n    if change_output:\n        for key, val in checkpoint['state_dict'].items():\n            if key.startswith('output_layer'):\n                state_dict.pop(key)\n    model.load_state_dict(state_dict, strict=False)\n    print('Loaded model from {}. Epoch: {}'.format(model_path, checkpoint['epoch']))\n\n    # resume optimizer parameters\n    if optimizer is not None and resume:\n        if 'optimizer' in checkpoint:\n            optimizer.load_state_dict(checkpoint['optimizer'])\n            start_epoch = checkpoint['epoch']\n            start_lr = lr\n            for i in range(len(lr_step)):\n                if start_epoch >= lr_step[i]:\n                    start_lr *= lr_factor[i]\n            for param_group in optimizer.param_groups:\n                param_group['lr'] = start_lr\n            print('Resumed optimizer with start lr', start_lr)\n        else:\n            print('No optimizer parameters in checkpoint.')\n    if optimizer is not None:\n        return model, optimizer, start_epoch\n    else:\n        return model\n\n\ndef load_config(config_filepath):\n    \"\"\"\n    Using a json file with the master configuration (config file for each part of the pipeline),\n    return a dictionary containing the entire configuration settings in a hierarchical fashion.\n    \"\"\"\n\n    with open(config_filepath) as cnfg:\n        config = json.load(cnfg)\n\n    return config\n\n\ndef create_dirs(dirs):\n    \"\"\"\n    Input:\n        dirs: a list of directories to create, in case these directories are not found\n    Returns:\n        exit_code: 0 if success, -1 if failure\n    \"\"\"\n    try:\n        for dir_ in dirs:\n            if not os.path.exists(dir_):\n                os.makedirs(dir_)\n        return 0\n    except Exception as err:\n        print(\"Creating directories error: {0}\".format(err))\n        exit(-1)\n\n\ndef export_performance_metrics(filepath, metrics_table, header, book=None, sheet_name=\"metrics\"):\n    \"\"\"Exports performance metrics on the validation set for all epochs to an excel file\"\"\"\n\n    if book is None:\n        book = xlwt.Workbook()  # new excel work book\n\n    book = write_table_to_sheet([header] + metrics_table, book, sheet_name=sheet_name)\n\n    book.save(filepath)\n    logger.info(\"Exported per epoch performance metrics in '{}'\".format(filepath))\n\n    return book\n\n\ndef write_row(sheet, row_ind, data_list):\n    \"\"\"Write a list to row_ind row of an excel sheet\"\"\"\n\n    row = sheet.row(row_ind)\n    for col_ind, col_value in enumerate(data_list):\n        row.write(col_ind, col_value)\n    return\n\n\ndef write_table_to_sheet(table, work_book, sheet_name=None):\n    \"\"\"Writes a table implemented as a list of lists to an excel sheet in the given work book object\"\"\"\n\n    sheet = work_book.add_sheet(sheet_name)\n\n    for row_ind, row_list in enumerate(table):\n        write_row(sheet, row_ind, row_list)\n\n    return work_book\n\n\ndef export_record(filepath, values):\n    \"\"\"Adds a list of values as a bottom row of a table in a given excel file\"\"\"\n\n    read_book = xlrd.open_workbook(filepath, formatting_info=True)\n    read_sheet = read_book.sheet_by_index(0)\n    last_row = read_sheet.nrows\n\n    work_book = copy(read_book)\n    sheet = work_book.get_sheet(0)\n    write_row(sheet, last_row, values)\n    work_book.save(filepath)\n\n\ndef register_record(filepath, timestamp, experiment_name, best_metrics, final_metrics=None, comment=''):\n    \"\"\"\n    Adds the best and final metrics of a given experiment as a record in an excel sheet with other experiment records.\n    Creates excel sheet if it doesn't exist.\n    Args:\n        filepath: path of excel file keeping records\n        timestamp: string\n        experiment_name: string\n        best_metrics: dict of metrics at best epoch {metric_name: metric_value}. Includes \"epoch\" as first key\n        final_metrics: dict of metrics at final epoch {metric_name: metric_value}. Includes \"epoch\" as first key\n        comment: optional description\n    \"\"\"\n    metrics_names, metrics_values = zip(*best_metrics.items())\n    row_values = [timestamp, experiment_name, comment] + list(metrics_values)\n    if final_metrics is not None:\n        final_metrics_names, final_metrics_values = zip(*final_metrics.items())\n        row_values += list(final_metrics_values)\n\n    if not os.path.exists(filepath):  # Create a records file for the first time\n        logger.warning(\"Records file '{}' does not exist! Creating new file ...\".format(filepath))\n        directory = os.path.dirname(filepath)\n        if len(directory) and not os.path.exists(directory):\n            os.makedirs(directory)\n        header = [\"Timestamp\", \"Name\", \"Comment\"] + [\"Best \" + m for m in metrics_names]\n        if final_metrics is not None:\n            header += [\"Final \" + m for m in final_metrics_names]\n        book = xlwt.Workbook()  # excel work book\n        book = write_table_to_sheet([header, row_values], book, sheet_name=\"records\")\n        book.save(filepath)\n    else:\n        try:\n            export_record(filepath, row_values)\n        except Exception as x:\n            alt_path = os.path.join(os.path.dirname(filepath), \"record_\" + experiment_name)\n            logger.error(\"Failed saving in: '{}'! Will save here instead: {}\".format(filepath, alt_path))\n            export_record(alt_path, row_values)\n            filepath = alt_path\n\n    logger.info(\"Exported performance record to '{}'\".format(filepath))\n\n\nclass Printer(object):\n    \"\"\"Class for printing output by refreshing the same line in the console, e.g. for indicating progress of a process\"\"\"\n\n    def __init__(self, console=True):\n\n        if console:\n            self.print = self.dyn_print\n        else:\n            self.print = builtins.print\n\n    @staticmethod\n    def dyn_print(data):\n        \"\"\"Print things to stdout on one line, refreshing it dynamically\"\"\"\n        sys.stdout.write(\"\\r\\x1b[K\" + data.__str__())\n        sys.stdout.flush()\n\n\ndef readable_time(time_difference):\n    \"\"\"Convert a float measuring time difference in seconds into a tuple of (hours, minutes, seconds)\"\"\"\n\n    hours = time_difference // 3600\n    minutes = (time_difference // 60) % 60\n    seconds = time_difference % 60\n\n    return hours, minutes, seconds\n\n\n# def check_model1(model, verbose=False, stop_on_error=False):\n#     status_ok = True\n#     for name, param in model.named_parameters():\n#         nan_grads = torch.isnan(param.grad)\n#         nan_params = torch.isnan(param)\n#         if nan_grads.any() or nan_params.any():\n#             status_ok = False\n#             print(\"Param {}: {}/{} nan\".format(name, torch.sum(nan_params), param.numel()))\n#             if verbose:\n#                 print(param)\n#             print(\"Grad {}: {}/{} nan\".format(name, torch.sum(nan_grads), param.grad.numel()))\n#             if verbose:\n#                 print(param.grad)\n#             if stop_on_error:\n#                 ipdb.set_trace()\n#     if status_ok:\n#         print(\"Model Check: OK\")\n#     else:\n#         print(\"Model Check: PROBLEM\")\n\n\ndef check_model(model, verbose=False, zero_thresh=1e-8, inf_thresh=1e6, stop_on_error=False):\n    status_ok = True\n    for name, param in model.named_parameters():\n        param_ok = check_tensor(param, verbose=verbose, zero_thresh=zero_thresh, inf_thresh=inf_thresh)\n        if not param_ok:\n            status_ok = False\n            print(\"Parameter '{}' PROBLEM\".format(name))\n        grad_ok = True\n        if param.grad is not None:\n            grad_ok = check_tensor(param.grad, verbose=verbose, zero_thresh=zero_thresh, inf_thresh=inf_thresh)\n        if not grad_ok:\n            status_ok = False\n            print(\"Gradient of parameter '{}' PROBLEM\".format(name))\n        if stop_on_error and not (param_ok and grad_ok):\n            ipdb.set_trace()\n\n    if status_ok:\n        print(\"Model Check: OK\")\n    else:\n        print(\"Model Check: PROBLEM\")\n\n\ndef check_tensor(X, verbose=True, zero_thresh=1e-8, inf_thresh=1e6):\n\n    is_nan = torch.isnan(X)\n    if is_nan.any():\n        print(\"{}/{} nan\".format(torch.sum(is_nan), X.numel()))\n        return False\n\n    num_small = torch.sum(torch.abs(X) < zero_thresh)\n    num_large = torch.sum(torch.abs(X) > inf_thresh)\n\n    if verbose:\n        print(\"Shape: {}, {} elements\".format(X.shape, X.numel()))\n        print(\"No 'nan' values\")\n        print(\"Min: {}\".format(torch.min(X)))\n        print(\"Median: {}\".format(torch.median(X)))\n        print(\"Max: {}\".format(torch.max(X)))\n\n        print(\"Histogram of values:\")\n        values = X.view(-1).detach().numpy()\n        hist, binedges = np.histogram(values, bins=20)\n        for b in range(len(binedges) - 1):\n            print(\"[{}, {}): {}\".format(binedges[b], binedges[b + 1], hist[b]))\n\n        print(\"{}/{} abs. values < {}\".format(num_small, X.numel(), zero_thresh))\n        print(\"{}/{} abs. values > {}\".format(num_large, X.numel(), inf_thresh))\n\n    if num_large:\n        print(\"{}/{} abs. values > {}\".format(num_large, X.numel(), inf_thresh))\n        return False\n\n    return True\n\n\ndef count_parameters(model, trainable=False):\n    if trainable:\n        return sum(p.numel() for p in model.parameters() if p.requires_grad)\n    else:\n        return sum(p.numel() for p in model.parameters())\n\n\ndef recursively_hook(model, hook_fn):\n    for name, module in model.named_children(): #model._modules.items():\n        if len(list(module.children())) > 0:  # if not leaf node\n            for submodule in module.children():\n                recursively_hook(submodule, hook_fn)\n        else:\n            module.register_forward_hook(hook_fn)\n\n\ndef compute_loss(net: torch.nn.Module,\n                 dataloader: torch.utils.data.DataLoader,\n                 loss_function: torch.nn.Module,\n                 device: torch.device = 'cpu') -> torch.Tensor:\n    \"\"\"Compute the loss of a network on a given dataset.\n\n    Does not compute gradient.\n\n    Parameters\n    ----------\n    net:\n        Network to evaluate.\n    dataloader:\n        Iterator on the dataset.\n    loss_function:\n        Loss function to compute.\n    device:\n        Torch device, or :py:class:`str`.\n\n    Returns\n    -------\n    Loss as a tensor with no grad.\n    \"\"\"\n    running_loss = 0\n    with torch.no_grad():\n        for x, y in dataloader:\n            netout = net(x.to(device)).cpu()\n            running_loss += loss_function(y, netout)\n\n    return running_loss / len(dataloader)\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/__init__.py",
    "content": ""
  },
  {
    "path": "ts_classification_methods/tstcc_cls/config_files/ucr_Configs.py",
    "content": "class Config(object):\n    def __init__(self):\n        # model configs\n        self.input_channels = 1\n        self.kernel_size = 8\n        self.stride = 1\n        self.final_out_channels = 128\n\n        self.num_classes = 3\n        self.dropout = 0.35\n        self.features_len = 18\n\n        # training configs\n        self.num_epoch = 600\n\n        # optimizer parameters\n        self.beta1 = 0.9\n        self.beta2 = 0.99\n        self.lr = 3e-4\n\n        # data parameters\n        self.drop_last = True\n        self.batch_size = 128\n\n        self.Context_Cont = Context_Cont_configs()\n        self.TC = TC()\n        self.augmentation = augmentations()\n\n\nclass augmentations(object):\n    def __init__(self):\n        self.jitter_scale_ratio = 1.1\n        self.jitter_ratio = 0.8\n        self.max_seg = 8\n\n\nclass Context_Cont_configs(object):\n    def __init__(self):\n        self.temperature = 0.2\n        self.use_cosine_similarity = True\n\n\nclass TC(object):\n    def __init__(self):\n        self.hidden_dim = 100\n        self.timesteps = 6\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/config_files/uea_Configs.py",
    "content": "class Config(object):\n    def __init__(self):\n        # model configs\n        self.input_channels = 9\n        self.kernel_size = 8\n        self.stride = 1\n        self.final_out_channels = 128\n\n        self.num_classes = 6\n        self.dropout = 0.35\n        self.features_len = 18\n\n        # training configs\n        self.num_epoch = 600\n\n        # optimizer parameters\n        self.beta1 = 0.9\n        self.beta2 = 0.99\n        self.lr = 3e-4\n\n        # data parameters\n        self.drop_last = True\n        self.batch_size = 128\n\n        self.Context_Cont = Context_Cont_configs()\n        self.TC = TC()\n        self.augmentation = augmentations()\n\n\nclass augmentations(object):\n    def __init__(self):\n        self.jitter_scale_ratio = 1.1\n        self.jitter_ratio = 0.8\n        self.max_seg = 8\n\n\nclass Context_Cont_configs(object):\n    def __init__(self):\n        self.temperature = 0.2\n        self.use_cosine_similarity = True\n\n\nclass TC(object):\n    def __init__(self):\n        self.hidden_dim = 100\n        self.timesteps = 6\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/dataloader/augmentations.py",
    "content": "import numpy as np\nimport torch\n\n\ndef DataTransform(sample, config):\n\n    weak_aug = scaling(sample, config.augmentation.jitter_scale_ratio)\n    strong_aug = jitter(permutation(sample, max_segments=config.augmentation.max_seg), config.augmentation.jitter_ratio)\n\n    return weak_aug, strong_aug\n\n\ndef jitter(x, sigma=0.8):\n    # https://arxiv.org/pdf/1706.00527.pdf\n    return x + np.random.normal(loc=0., scale=sigma, size=x.shape)\n\n\ndef scaling(x, sigma=1.1):\n    # https://arxiv.org/pdf/1706.00527.pdf\n    factor = np.random.normal(loc=2., scale=sigma, size=(x.shape[0], x.shape[2]))\n    ai = []\n    for i in range(x.shape[1]):\n        xi = x[:, i, :]\n        ai.append(np.multiply(xi, factor[:, :])[:, np.newaxis, :])\n    return np.concatenate((ai), axis=1)\n\n\ndef permutation(x, max_segments=5, seg_mode=\"random\"):\n    orig_steps = np.arange(x.shape[2])\n\n    num_segs = np.random.randint(1, max_segments, size=(x.shape[0]))\n\n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        if num_segs[i] > 1:\n            if seg_mode == \"random\":\n                split_points = np.random.choice(x.shape[2] - 2, num_segs[i] - 1, replace=False)\n                split_points.sort()\n                splits = np.split(orig_steps, split_points)\n            else:\n                splits = np.array_split(orig_steps, num_segs[i])\n            warp = np.concatenate(np.random.permutation(splits)).ravel()\n            ret[i] = pat[0,warp]\n        else:\n            ret[i] = pat\n    return torch.from_numpy(ret)\n\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/dataloader/dataloader.py",
    "content": "import os\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\n\nfrom .augmentations import DataTransform\n\n\nclass Load_Dataset(Dataset):\n    # Initialize your data, download, etc.\n    def __init__(self, dataset, config, training_mode):\n        super(Load_Dataset, self).__init__()\n        self.training_mode = training_mode\n\n        X_train = dataset[\"samples\"]\n        y_train = dataset[\"labels\"]\n\n        if len(X_train.shape) < 3:\n            X_train = X_train.unsqueeze(2)\n\n        if X_train.shape.index(min(X_train.shape)) != 1:  # make sure the Channels in second dim\n            X_train = X_train.permute(0, 2, 1)\n\n        elif X_train.shape.index(min(\n                X_train.shape)) == 1 and config.input_channels > 1:  # make sure part uea datasets the Channels in second dim\n            X_train = X_train.permute(0, 2, 1)\n\n        if isinstance(X_train, np.ndarray):\n            self.x_data = torch.from_numpy(X_train)\n            self.y_data = torch.from_numpy(y_train).long()\n        else:\n            self.x_data = X_train\n            self.y_data = y_train\n\n        self.len = X_train.shape[0]\n        if training_mode == \"self_supervised\":  # no need to apply Augmentations in other modes\n            self.aug1, self.aug2 = DataTransform(self.x_data, config)\n\n    def __getitem__(self, index):\n        if self.training_mode == \"self_supervised\":\n            return self.x_data[index], self.y_data[index], self.aug1[index], self.aug2[index]\n        else:\n            return self.x_data[index], self.y_data[index], self.x_data[index], self.x_data[index]\n\n    def __len__(self):\n        return self.len\n\n\ndef data_generator(data_path, configs, training_mode):\n    train_dataset = torch.load(os.path.join(data_path, \"train.pt\"))\n    valid_dataset = torch.load(os.path.join(data_path, \"val.pt\"))\n    test_dataset = torch.load(os.path.join(data_path, \"test.pt\"))\n\n    print(type(train_dataset[\"samples\"]))\n    print(\"Data shape = \", train_dataset[\"samples\"].shape, valid_dataset[\"samples\"].shape,\n          test_dataset[\"samples\"].shape)\n\n    train_dataset = Load_Dataset(train_dataset, configs, training_mode)\n    valid_dataset = Load_Dataset(valid_dataset, configs, training_mode)\n    test_dataset = Load_Dataset(test_dataset, configs, training_mode)\n\n    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=configs.batch_size,\n                                               shuffle=True, drop_last=configs.drop_last,\n                                               num_workers=0)\n    valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=configs.batch_size,\n                                               shuffle=False, drop_last=configs.drop_last,\n                                               num_workers=0)\n\n    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=configs.batch_size,\n                                              shuffle=False, drop_last=False,\n                                              num_workers=0)\n\n    return train_loader, valid_loader, test_loader\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/main.py",
    "content": "import torch\n\nimport os\nimport numpy as np\nfrom datetime import datetime\nimport argparse\nfrom tstcc_cls.utils import _logger, set_requires_grad\nfrom tstcc_cls.dataloader.dataloader import data_generator\nfrom tstcc_cls.trainer.trainer import Trainer, model_evaluate\nfrom tstcc_cls.models.TC import TC\nfrom tstcc_cls.utils import _calc_metrics, copy_Files\nfrom tstcc_cls.models.model import base_Model\n# Args selections\nstart_time = datetime.now()\n\n\nparser = argparse.ArgumentParser()\n\n######################## Model parameters ########################\nhome_dir = os.getcwd()\nparser.add_argument('--experiment_description', default='Exp1', type=str,\n                    help='Experiment Description')\nparser.add_argument('--run_description', default='run1', type=str,\n                    help='Experiment Description')\nparser.add_argument('--seed', default=0, type=int,\n                    help='seed value')\nparser.add_argument('--training_mode', default='supervised', type=str,\n                    help='Modes of choice: random_init, supervised, self_supervised, fine_tune, train_linear')\nparser.add_argument('--selected_dataset', default='Epilepsy', type=str,\n                    help='Dataset of choice: sleepEDF, HAR, Epilepsy, pFD')\nparser.add_argument('--logs_save_dir', default='experiments_logs', type=str,\n                    help='saving directory')\nparser.add_argument('--device', default='cuda', type=str,\n                    help='cpu or cuda')\nparser.add_argument('--home_path', default=home_dir, type=str,\n                    help='Project home directory')\nargs = parser.parse_args()\n\n\n\ndevice = torch.device(args.device)\nexperiment_description = args.experiment_description\ndata_type = args.selected_dataset\nmethod = 'TS-TCC'\ntraining_mode = args.training_mode\nrun_description = args.run_description\n\nlogs_save_dir = args.logs_save_dir\nos.makedirs(logs_save_dir, exist_ok=True)\n\n\nexec(f'from config_files.{data_type}_Configs import Config as Configs')\nconfigs = Configs()\n\n# ##### fix random seeds for reproducibility ########\nSEED = args.seed\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\n#####################################################\n\nexperiment_log_dir = os.path.join(logs_save_dir, experiment_description, run_description, training_mode + f\"_seed_{SEED}\")\nos.makedirs(experiment_log_dir, exist_ok=True)\n\n# loop through domains\ncounter = 0\nsrc_counter = 0\n\n\n# Logging\nlog_file_name = os.path.join(experiment_log_dir, f\"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log\")\nlogger = _logger(log_file_name)\nlogger.debug(\"=\" * 45)\nlogger.debug(f'Dataset: {data_type}')\nlogger.debug(f'Method:  {method}')\nlogger.debug(f'Mode:    {training_mode}')\nlogger.debug(\"=\" * 45)\n\n# Load datasets\ndata_path = f\"./data/{data_type}\"\ntrain_dl, valid_dl, test_dl = data_generator(data_path, configs, training_mode)\nlogger.debug(\"Data loaded ...\")\n\n# Load Model\nmodel = base_Model(configs).to(device)\ntemporal_contr_model = TC(configs, device).to(device)\n\nif training_mode == \"fine_tune\":\n    # load saved model of this experiment\n    load_from = os.path.join(os.path.join(logs_save_dir, experiment_description, run_description, f\"self_supervised_seed_{SEED}\", \"saved_models\"))\n    chkpoint = torch.load(os.path.join(load_from, \"ckp_last.pt\"), map_location=device)\n    pretrained_dict = chkpoint[\"model_state_dict\"]\n    model_dict = model.state_dict()\n    del_list = ['logits']\n    pretrained_dict_copy = pretrained_dict.copy()\n    for i in pretrained_dict_copy.keys():\n        for j in del_list:\n            if j in i:\n                del pretrained_dict[i]\n    model_dict.update(pretrained_dict)\n    model.load_state_dict(model_dict)\n\nif training_mode == \"train_linear\" or \"tl\" in training_mode:\n    load_from = os.path.join(os.path.join(logs_save_dir, experiment_description, run_description, f\"self_supervised_seed_{SEED}\", \"saved_models\"))\n    chkpoint = torch.load(os.path.join(load_from, \"ckp_last.pt\"), map_location=device)\n    pretrained_dict = chkpoint[\"model_state_dict\"]\n    model_dict = model.state_dict()\n\n    # 1. filter out unnecessary keys\n    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n\n    # delete these parameters (Ex: the linear layer at the end)\n    del_list = ['logits']\n    pretrained_dict_copy = pretrained_dict.copy()\n    for i in pretrained_dict_copy.keys():\n        for j in del_list:\n            if j in i:\n                del pretrained_dict[i]\n\n    model_dict.update(pretrained_dict)\n    model.load_state_dict(model_dict)\n    set_requires_grad(model, pretrained_dict, requires_grad=False)  # Freeze everything except last layer.\n\nif training_mode == \"random_init\":\n    model_dict = model.state_dict()\n\n    # delete all the parameters except for logits\n    del_list = ['logits']\n    pretrained_dict_copy = model_dict.copy()\n    for i in pretrained_dict_copy.keys():\n        for j in del_list:\n            if j in i:\n                del model_dict[i]\n    set_requires_grad(model, model_dict, requires_grad=False)  # Freeze everything except last layer.\n\n\n\nmodel_optimizer = torch.optim.Adam(model.parameters(), lr=configs.lr, betas=(configs.beta1, configs.beta2), weight_decay=3e-4)\ntemporal_contr_optimizer = torch.optim.Adam(temporal_contr_model.parameters(), lr=configs.lr, betas=(configs.beta1, configs.beta2), weight_decay=3e-4)\n\nif training_mode == \"self_supervised\":  # to do it only once\n    copy_Files(os.path.join(logs_save_dir, experiment_description, run_description), data_type)\n\n# Trainer\nTrainer(model, temporal_contr_model, model_optimizer, temporal_contr_optimizer, train_dl, valid_dl, test_dl, device, logger, configs, experiment_log_dir, training_mode)\n\nif training_mode != \"self_supervised\":\n    # Testing\n    outs = model_evaluate(model, temporal_contr_model, test_dl, device, training_mode)\n    total_loss, total_acc, pred_labels, true_labels = outs\n    _calc_metrics(pred_labels, true_labels, experiment_log_dir, args.home_path)\n\nlogger.debug(f\"Training time is : {datetime.now()-start_time}\")\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/main_ucr.py",
    "content": "import argparse\nimport os\nimport sys\nimport time\nfrom datetime import datetime\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport numpy as np\nimport torch\n\nfrom data.preprocessing import load_data, k_fold, normalize_per_series, fill_nan_value, transfer_labels\nfrom tsm_utils import save_cls_result, set_seed\nfrom tstcc_cls.models.TC import TC\nfrom tstcc_cls.models.model import base_Model\nfrom tstcc_cls.trainer.trainer import Trainer_cls\nfrom tstcc_cls.utils import _logger, generator_ucr, generator_ucr_config\n\n# Args selections\nstart_time = datetime.now()\n\nparser = argparse.ArgumentParser()\n\n######################## Model parameters ########################\nhome_dir = os.getcwd()\nparser.add_argument('--experiment_description', default='Exp1', type=str,\n                    help='Experiment Description')\nparser.add_argument('--run_description', default='run1', type=str,\n                    help='Experiment Description')\nparser.add_argument('--seed', default=42, type=int,\n                    help='seed value')\nparser.add_argument('--random_seed', type=int, default=42, help='The random seed')\nparser.add_argument('--training_mode', default='self_supervised', type=str,\n                    help='Modes of choice: random_init, supervised, self_supervised, fine_tune, train_linear')\nparser.add_argument('--selected_dataset', default='ucr', type=str,\n                    help='Dataset of choice: sleepEDF, HAR, Epilepsy, pFD')  ## HAR\nparser.add_argument('--dataset', default='CBF', type=str,\n                    help='Dataset of choice: sleepEDF, HAR, Epilepsy, pFD')\nparser.add_argument('--logs_save_dir', default='experiments_logs', type=str,\n                    help='saving directory')\nparser.add_argument('--device', default='cuda:0', type=str,\n                    help='cpu or cuda')\nparser.add_argument('--home_path', default=home_dir, type=str,\n                    help='Project home directory')\nparser.add_argument('--save_csv_name', type=str, default='test_tstcc_ucr_0424_')\nparser.add_argument('--save_dir', type=str, default='/SSD/lz/time_tsm/tstcc_cls/result')\nargs = parser.parse_args()\nset_seed(args)\n\ndevice = torch.device(args.device)\nexperiment_description = args.experiment_description\ndata_type = args.selected_dataset\nmethod = 'TS-TCC'\ntraining_mode = args.training_mode\nrun_description = args.run_description\n\nlogs_save_dir = args.logs_save_dir\nos.makedirs(logs_save_dir, exist_ok=True)\n\nexec(f'from config_files.{data_type}_Configs import Config as Configs')\nconfigs = Configs()\n\n# ##### fix random seeds for reproducibility ########\nSEED = args.seed\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\n#####################################################\n\nexperiment_log_dir = os.path.join(logs_save_dir, experiment_description, run_description,\n                                  training_mode + f\"_seed_{SEED}\")\nos.makedirs(experiment_log_dir, exist_ok=True)\n\n# loop through domains\ncounter = 0\nsrc_counter = 0\n\n# Logging\nlog_file_name = os.path.join(experiment_log_dir, f\"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log\")\nlogger = _logger(log_file_name)\nlogger.debug(\"=\" * 45)\nlogger.debug(f'Dataset: {data_type}')\nlogger.debug(f'Method:  {method}')\nlogger.debug(f'Mode:    {training_mode}')\nlogger.debug(\"=\" * 45)\n\n# Load datasets\ndata_path = f\"./data/{data_type}\"\n\nsum_dataset, sum_target, num_classes = load_data(\n    dataroot='/SSD/lz/UCRArchive_2018',\n    dataset=args.dataset)\nsum_target = transfer_labels(sum_target)\n# sum_dataset = sum_dataset[..., np.newaxis]\ntrain_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = k_fold(\n    sum_dataset, sum_target)\n# print(\"Start features_len = \", configs.features_len, \", num_classes = \", configs.num_classes)\ngenerator_ucr_config(data=train_datasets[0], label=train_targets[0], configs=configs)\nif train_datasets[0].shape[1] < 30:\n    configs.TC.timesteps = 1\n# print(\"End features_len = \", configs.features_len, \", num_classes = \", configs.num_classes)\n# train_dl, valid_dl, test_dl = data_generator(data_path, configs, training_mode)\ntrain_accuracies = []\nval_accuracies = []\ntest_accuracies = []\nt = time.time()\nfor i in range(5):\n    ### mean impute\n    train_data, val_data, test_data = fill_nan_value(train_datasets[i], val_datasets[i], test_datasets[i])\n\n    ### normalize\n    train_data = normalize_per_series(train_data)\n    val_data = normalize_per_series(val_data)\n    test_data = normalize_per_series(test_data)\n\n    train_data = train_data[..., np.newaxis]\n    val_data = val_data[..., np.newaxis]\n    test_data = test_data[..., np.newaxis]\n\n    train_dl = generator_ucr(data=train_data, label=train_targets[i],\n                             configs=configs, training_mode='self_supervised', drop_last=True)\n    valid_dl = generator_ucr(data=val_data, label=val_targets[i],\n                             configs=configs, training_mode='self_supervised', drop_last=False)\n    test_dl = generator_ucr(data=test_data, label=test_targets[i],\n                            configs=configs, training_mode='self_supervised', drop_last=False)\n    logger.debug(\"Data loaded ...\")\n\n    # Load Model\n    model = base_Model(configs).to(device)\n    temporal_contr_model = TC(configs, device).to(device)\n\n    model_optimizer = torch.optim.Adam(model.parameters(), lr=configs.lr, betas=(configs.beta1, configs.beta2),\n                                       weight_decay=3e-4)\n    temporal_contr_optimizer = torch.optim.Adam(temporal_contr_model.parameters(), lr=configs.lr,\n                                                betas=(configs.beta1, configs.beta2), weight_decay=3e-4)\n\n    # copy_Files(os.path.join(logs_save_dir, experiment_description, run_description), data_type) # to do it only once\n\n    # self_supervised Trainer\n    Trainer_cls(model, temporal_contr_model, model_optimizer, temporal_contr_optimizer, train_dl, valid_dl,\n                test_dl, device, logger, configs, experiment_log_dir, training_mode='self_supervised')\n    print(\"Self_supervised end, start fine_tune!\")\n    # fine_tune Trainer\n    train_dl = generator_ucr(data=train_data, label=train_targets[i],\n                             configs=configs, training_mode='fine_tune', drop_last=True)\n    valid_dl = generator_ucr(data=val_data, label=val_targets[i],\n                             configs=configs, training_mode='fine_tune', drop_last=False)\n    test_dl = generator_ucr(data=test_data, label=test_targets[i],\n                            configs=configs, training_mode='fine_tune', drop_last=False)\n\n    train_acc, val_acc, test_acc = Trainer_cls(model, temporal_contr_model, model_optimizer, temporal_contr_optimizer,\n                                               train_dl, valid_dl,\n                                               test_dl, device, logger, configs, experiment_log_dir,\n                                               training_mode='fine_tune')\n    # print(type(train_acc.data), train_acc.numpy(), val_acc.numpy(), test_acc.numpy())\n    # train_accuracies = torch.Tensor(train_accuracies)\n    # test_accuracies = torch.Tensor(test_accuracies)\n    train_accuracies.append(train_acc.item())\n    val_accuracies.append(val_acc.item())\n    test_accuracies.append(test_acc.item())\n\ntrain_time = time.time() - t\nprint(\"train_accuracies = \", train_accuracies, len(train_accuracies))\ntest_accuracies = torch.Tensor(test_accuracies)\nsave_cls_result(args, test_accu=torch.mean(test_accuracies), test_std=torch.std(test_accuracies),\n                train_time=train_time / 5, end_val_epoch=0.0, seeds=args.seed)\nlogger.debug(f\"Training time is : {datetime.now() - start_time}\")\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/main_uea.py",
    "content": "import argparse\nimport os\nimport sys\nimport time\nfrom datetime import datetime\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport numpy as np\nimport torch\n\nfrom data.preprocessing import k_fold, load_UEA, fill_nan_value, normalize_uea_set\nfrom tsm_utils import save_cls_result, set_seed\nfrom tstcc_cls.models.TC import TC\nfrom tstcc_cls.models.model import base_Model\nfrom tstcc_cls.trainer.trainer import Trainer_cls\nfrom tstcc_cls.utils import _logger, generator_uea_config, generator_uea\n\n# Args selections\nstart_time = datetime.now()\n\nparser = argparse.ArgumentParser()\n\n######################## Model parameters ########################\nhome_dir = os.getcwd()\nparser.add_argument('--experiment_description', default='Exp1', type=str,\n                    help='Experiment Description')\nparser.add_argument('--run_description', default='run1', type=str,\n                    help='Experiment Description')\nparser.add_argument('--seed', default=42, type=int,\n                    help='seed value')\nparser.add_argument('--random_seed', type=int, default=42, help='The random seed')\nparser.add_argument('--training_mode', default='self_supervised', type=str,\n                    help='Modes of choice: random_init, supervised, self_supervised, fine_tune, train_linear')\nparser.add_argument('--selected_dataset', default='uea', type=str,\n                    help='Dataset of choice: sleepEDF, HAR, Epilepsy, pFD')  ## HAR\nparser.add_argument('--dataset', default='EigenWorms', type=str,\n                    help='Dataset of choice: sleepEDF, HAR, Epilepsy, pFD')\nparser.add_argument('--logs_save_dir', default='experiments_logs', type=str,\n                    help='saving directory')\nparser.add_argument('--device', default='cuda:1', type=str,\n                    help='cpu or cuda')\nparser.add_argument('--home_path', default=home_dir, type=str,\n                    help='Project home directory')\nparser.add_argument('--save_csv_name', type=str, default='test_tstcc_uea_0425_')\nparser.add_argument('--save_dir', type=str, default='/SSD/lz/time_tsm/tstcc_cls/result')\nargs = parser.parse_args()\nset_seed(args)\n\ndevice = torch.device(args.device)\nexperiment_description = args.experiment_description\ndata_type = args.selected_dataset\nmethod = 'TS-TCC'\ntraining_mode = args.training_mode\nrun_description = args.run_description\n\nlogs_save_dir = args.logs_save_dir\nos.makedirs(logs_save_dir, exist_ok=True)\n\nexec(f'from config_files.{data_type}_Configs import Config as Configs')\nconfigs = Configs()\n\n# ##### fix random seeds for reproducibility ########\nSEED = args.seed\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\n#####################################################\n\nexperiment_log_dir = os.path.join(logs_save_dir, experiment_description, run_description,\n                                  training_mode + f\"_seed_{SEED}\")\nos.makedirs(experiment_log_dir, exist_ok=True)\n\n# loop through domains\ncounter = 0\nsrc_counter = 0\n\n# Logging\nlog_file_name = os.path.join(experiment_log_dir, f\"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log\")\nlogger = _logger(log_file_name)\nlogger.debug(\"=\" * 45)\nlogger.debug(f'Dataset: {data_type}')\nlogger.debug(f'Method:  {method}')\nlogger.debug(f'Mode:    {training_mode}')\nlogger.debug(\"=\" * 45)\n\n# Load datasets\ndata_path = f\"./data/{data_type}\"\n\nsum_dataset, sum_target, num_classes = load_UEA(\n    dataroot='/SSD/lz/Multivariate2018_arff',\n    dataset=args.dataset)\n# sum_dataset = sum_dataset[..., np.newaxis]\ntrain_datasets, train_targets, val_datasets, val_targets, test_datasets, test_targets = k_fold(\n    sum_dataset, sum_target)\n# print(\"Start features_len = \", configs.features_len, \", num_classes = \", configs.num_classes)\ngenerator_uea_config(data=train_datasets[0], label=train_targets[0], configs=configs)\nif args.dataset == 'EigenWorms':\n    configs.augmentation.max_seg = 5\n    configs.batch_size = 8\nif train_datasets[0].shape[1] <= 30:\n    configs.TC.timesteps = 1\n# print(\"End features_len = \", configs.features_len, \", num_classes = \", configs.num_classes, \", input_channels = \",\n#       configs.input_channels)\n# train_dl, valid_dl, test_dl = data_generator(data_path, configs, training_mode)\ntrain_accuracies = []\nval_accuracies = []\ntest_accuracies = []\nt = time.time()\nfor i in range(5):\n    ### mean impute\n    train_data, val_data, test_data = fill_nan_value(train_datasets[i], val_datasets[i], test_datasets[i])\n\n    ### normalize\n    train_data = normalize_uea_set(train_data)\n    val_data = normalize_uea_set(val_data)\n    test_data = normalize_uea_set(test_data)\n\n    # train_data = train_data[..., np.newaxis]\n    # val_data = val_data[..., np.newaxis]\n    # test_data = test_data[..., np.newaxis]\n\n    train_dl = generator_uea(data=train_data, label=train_targets[i],\n                             configs=configs, training_mode='self_supervised', drop_last=True)\n    valid_dl = generator_uea(data=val_data, label=val_targets[i],\n                             configs=configs, training_mode='self_supervised', drop_last=False)\n    test_dl = generator_uea(data=test_data, label=test_targets[i],\n                            configs=configs, training_mode='self_supervised', drop_last=False)\n    logger.debug(\"Data loaded ...\")\n\n    # Load Model\n    model = base_Model(configs).to(device)\n    temporal_contr_model = TC(configs, device).to(device)\n\n    model_optimizer = torch.optim.Adam(model.parameters(), lr=configs.lr, betas=(configs.beta1, configs.beta2),\n                                       weight_decay=3e-4)\n    temporal_contr_optimizer = torch.optim.Adam(temporal_contr_model.parameters(), lr=configs.lr,\n                                                betas=(configs.beta1, configs.beta2), weight_decay=3e-4)\n\n    # copy_Files(os.path.join(logs_save_dir, experiment_description, run_description), data_type) # to do it only once\n\n    # self_supervised Trainer\n    Trainer_cls(model, temporal_contr_model, model_optimizer, temporal_contr_optimizer, train_dl, valid_dl,\n                test_dl, device, logger, configs, experiment_log_dir, training_mode='self_supervised')\n    print(\"Self_supervised end, start fine_tune!\")\n    # fine_tune Trainer\n    train_dl = generator_uea(data=train_data, label=train_targets[i],\n                             configs=configs, training_mode='fine_tune', drop_last=True)\n    valid_dl = generator_uea(data=val_data, label=val_targets[i],\n                             configs=configs, training_mode='fine_tune', drop_last=False)\n    test_dl = generator_uea(data=test_data, label=test_targets[i],\n                            configs=configs, training_mode='fine_tune', drop_last=False)\n\n    train_acc, val_acc, test_acc = Trainer_cls(model, temporal_contr_model, model_optimizer, temporal_contr_optimizer,\n                                               train_dl, valid_dl,\n                                               test_dl, device, logger, configs, experiment_log_dir,\n                                               training_mode='fine_tune')\n    # print(type(train_acc.data), train_acc.numpy(), val_acc.numpy(), test_acc.numpy())\n    # train_accuracies = torch.Tensor(train_accuracies)\n    # test_accuracies = torch.Tensor(test_accuracies)\n    train_accuracies.append(train_acc.item())\n    val_accuracies.append(val_acc.item())\n    test_accuracies.append(test_acc.item())\n\ntrain_time = time.time() - t\nprint(\"train_accuracies = \", train_accuracies, len(train_accuracies))\ntest_accuracies = torch.Tensor(test_accuracies)\nsave_cls_result(args, test_accu=torch.mean(test_accuracies), test_std=torch.std(test_accuracies),\n                train_time=train_time / 5, end_val_epoch=0.0, seeds=args.seed)\nlogger.debug(f\"Training time is : {datetime.now() - start_time}\")\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/models/TC.py",
    "content": "import torch\nimport torch.nn as nn\nimport numpy as np\nfrom .attention import Seq_Transformer\n\n\n\nclass TC(nn.Module):\n    def __init__(self, configs, device):\n        super(TC, self).__init__()\n        self.num_channels = configs.final_out_channels\n        self.timestep = configs.TC.timesteps\n        self.Wk = nn.ModuleList([nn.Linear(configs.TC.hidden_dim, self.num_channels) for i in range(self.timestep)])\n        self.lsoftmax = nn.LogSoftmax()\n        self.device = device\n        \n        self.projection_head = nn.Sequential(\n            nn.Linear(configs.TC.hidden_dim, configs.final_out_channels // 2),\n            nn.BatchNorm1d(configs.final_out_channels // 2),\n            nn.ReLU(inplace=True),\n            nn.Linear(configs.final_out_channels // 2, configs.final_out_channels // 4),\n        )\n\n        self.seq_transformer = Seq_Transformer(patch_size=self.num_channels, dim=configs.TC.hidden_dim, depth=4, heads=4, mlp_dim=64)\n\n    def forward(self, features_aug1, features_aug2):\n        z_aug1 = features_aug1  # features are (batch_size, #channels, seq_len)\n        seq_len = z_aug1.shape[2]\n        z_aug1 = z_aug1.transpose(1, 2)\n\n        z_aug2 = features_aug2\n        z_aug2 = z_aug2.transpose(1, 2)\n\n        batch = z_aug1.shape[0]\n        t_samples = torch.randint(seq_len - self.timestep, size=(1,)).long().to(self.device)  # randomly pick time stamps\n\n        nce = 0  # average over timestep and batch\n        encode_samples = torch.empty((self.timestep, batch, self.num_channels)).float().to(self.device)\n\n        for i in np.arange(1, self.timestep + 1):\n            encode_samples[i - 1] = z_aug2[:, t_samples + i, :].view(batch, self.num_channels)\n        forward_seq = z_aug1[:, :t_samples + 1, :]\n\n        c_t = self.seq_transformer(forward_seq)\n\n        pred = torch.empty((self.timestep, batch, self.num_channels)).float().to(self.device)\n        for i in np.arange(0, self.timestep):\n            linear = self.Wk[i]\n            pred[i] = linear(c_t)\n        for i in np.arange(0, self.timestep):\n            total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1))\n            nce += torch.sum(torch.diag(self.lsoftmax(total)))\n        nce /= -1. * batch * self.timestep\n        return nce, self.projection_head(c_t)"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/models/attention.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom einops import rearrange, repeat\n\n\n########################################################################################\n\nclass Residual(nn.Module):\n    def __init__(self, fn):\n        super().__init__()\n        self.fn = fn\n\n    def forward(self, x, **kwargs):\n        return self.fn(x, **kwargs) + x\n\n\nclass PreNorm(nn.Module):\n    def __init__(self, dim, fn):\n        super().__init__()\n        self.norm = nn.LayerNorm(dim)\n        self.fn = fn\n\n    def forward(self, x, **kwargs):\n        return self.fn(self.norm(x), **kwargs)\n\n\nclass FeedForward(nn.Module):\n    def __init__(self, dim, hidden_dim, dropout=0.):\n        super().__init__()\n        self.net = nn.Sequential(\n            nn.Linear(dim, hidden_dim),\n            nn.ReLU(),\n            nn.Dropout(dropout),\n            nn.Linear(hidden_dim, dim),\n            nn.Dropout(dropout)\n        )\n\n    def forward(self, x):\n        return self.net(x)\n\n\nclass Attention(nn.Module):\n    def __init__(self, dim, heads=8, dropout=0.):\n        super().__init__()\n        self.heads = heads\n        self.scale = dim ** -0.5\n\n        self.to_qkv = nn.Linear(dim, dim * 3, bias=False)\n        self.to_out = nn.Sequential(\n            nn.Linear(dim, dim),\n            nn.Dropout(dropout)\n        )\n\n    def forward(self, x, mask=None):\n        b, n, _, h = *x.shape, self.heads\n        qkv = self.to_qkv(x).chunk(3, dim=-1)\n        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv)\n\n        dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale\n\n        if mask is not None:\n            mask = F.pad(mask.flatten(1), (1, 0), value=True)\n            assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'\n            mask = mask[:, None, :] * mask[:, :, None]\n            dots.masked_fill_(~mask, float('-inf'))\n            del mask\n\n        attn = dots.softmax(dim=-1)\n\n        out = torch.einsum('bhij,bhjd->bhid', attn, v)\n        out = rearrange(out, 'b h n d -> b n (h d)')\n        out = self.to_out(out)\n        return out\n\n\nclass Transformer(nn.Module):\n    def __init__(self, dim, depth, heads, mlp_dim, dropout):\n        super().__init__()\n        self.layers = nn.ModuleList([])\n        for _ in range(depth):\n            self.layers.append(nn.ModuleList([\n                Residual(PreNorm(dim, Attention(dim, heads=heads, dropout=dropout))),\n                Residual(PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout)))\n            ]))\n\n    def forward(self, x, mask=None):\n        for attn, ff in self.layers:\n            x = attn(x, mask=mask)\n            x = ff(x)\n        return x\n\n\nclass Seq_Transformer(nn.Module):\n    def __init__(self, *, patch_size, dim, depth, heads, mlp_dim, channels=1, dropout=0.1):\n        super().__init__()\n        patch_dim = channels * patch_size\n        self.patch_to_embedding = nn.Linear(patch_dim, dim)\n        self.c_token = nn.Parameter(torch.randn(1, 1, dim))\n        self.transformer = Transformer(dim, depth, heads, mlp_dim, dropout)\n        self.to_c_token = nn.Identity()\n\n\n    def forward(self, forward_seq):\n        x = self.patch_to_embedding(forward_seq)\n        b, n, _ = x.shape\n        c_tokens = repeat(self.c_token, '() n d -> b n d', b=b)\n        x = torch.cat((c_tokens, x), dim=1)\n        x = self.transformer(x)\n        c_t = self.to_c_token(x[:, 0])\n        return c_t\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/models/loss.py",
    "content": "import torch\nimport numpy as np\n\nclass NTXentLoss(torch.nn.Module):\n\n    def __init__(self, device, batch_size, temperature, use_cosine_similarity):\n        super(NTXentLoss, self).__init__()\n        self.batch_size = batch_size\n        self.temperature = temperature\n        self.device = device\n        self.softmax = torch.nn.Softmax(dim=-1)\n        self.mask_samples_from_same_repr = self._get_correlated_mask().type(torch.bool)\n        self.similarity_function = self._get_similarity_function(use_cosine_similarity)\n        self.criterion = torch.nn.CrossEntropyLoss(reduction=\"sum\")\n\n    def _get_similarity_function(self, use_cosine_similarity):\n        if use_cosine_similarity:\n            self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)\n            return self._cosine_simililarity\n        else:\n            return self._dot_simililarity\n\n    def _get_correlated_mask(self):\n        diag = np.eye(2 * self.batch_size)\n        l1 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=-self.batch_size)\n        l2 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=self.batch_size)\n        mask = torch.from_numpy((diag + l1 + l2))\n        mask = (1 - mask).type(torch.bool)\n        return mask.to(self.device)\n\n    @staticmethod\n    def _dot_simililarity(x, y):\n        v = torch.tensordot(x.unsqueeze(1), y.T.unsqueeze(0), dims=2)\n        # x shape: (N, 1, C)\n        # y shape: (1, C, 2N)\n        # v shape: (N, 2N)\n        return v\n\n    def _cosine_simililarity(self, x, y):\n        # x shape: (N, 1, C)\n        # y shape: (1, 2N, C)\n        # v shape: (N, 2N)\n        v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))\n        return v\n\n    def forward(self, zis, zjs):\n        representations = torch.cat([zjs, zis], dim=0)\n\n        similarity_matrix = self.similarity_function(representations, representations)\n\n        # filter out the scores from the positive samples\n        l_pos = torch.diag(similarity_matrix, self.batch_size)\n        r_pos = torch.diag(similarity_matrix, -self.batch_size)\n        positives = torch.cat([l_pos, r_pos]).view(2 * self.batch_size, 1)\n\n        negatives = similarity_matrix[self.mask_samples_from_same_repr].view(2 * self.batch_size, -1)\n\n        logits = torch.cat((positives, negatives), dim=1)\n        logits /= self.temperature\n\n        labels = torch.zeros(2 * self.batch_size).to(self.device).long()\n        loss = self.criterion(logits, labels)\n\n        return loss / (2 * self.batch_size)\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/models/model.py",
    "content": "from torch import nn\n\nclass base_Model(nn.Module):\n    def __init__(self, configs):\n        super(base_Model, self).__init__()\n\n        self.conv_block1 = nn.Sequential(\n            nn.Conv1d(configs.input_channels, 32, kernel_size=configs.kernel_size,\n                      stride=configs.stride, bias=False, padding=(configs.kernel_size//2)),\n            nn.BatchNorm1d(32),\n            nn.ReLU(),\n            nn.MaxPool1d(kernel_size=2, stride=2, padding=1),\n            nn.Dropout(configs.dropout)\n        )\n\n        self.conv_block2 = nn.Sequential(\n            nn.Conv1d(32, 64, kernel_size=8, stride=1, bias=False, padding=4),\n            nn.BatchNorm1d(64),\n            nn.ReLU(),\n            nn.MaxPool1d(kernel_size=2, stride=2, padding=1)\n        )\n\n        self.conv_block3 = nn.Sequential(\n            nn.Conv1d(64, configs.final_out_channels, kernel_size=8, stride=1, bias=False, padding=4),\n            nn.BatchNorm1d(configs.final_out_channels),\n            nn.ReLU(),\n            nn.MaxPool1d(kernel_size=2, stride=2, padding=1),\n        )\n\n        model_output_dim = configs.features_len\n        self.logits = nn.Linear(model_output_dim * configs.final_out_channels, configs.num_classes)\n\n    def forward(self, x_in):\n        x = self.conv_block1(x_in)\n        x = self.conv_block2(x)\n        x = self.conv_block3(x)\n\n        x_flat = x.reshape(x.shape[0], -1)\n        logits = self.logits(x_flat)\n        return logits, x\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/result/tstcc_0327_cls_result.csv",
    "content": "id,dataset_name,test_accuracy,test_std,val_accuracy,val_std,train_accuracy,train_std\n0,ACSF1,0.775,0.0631,0.605,0.0622,0.545,0.0326\n1,Adiac,0.6396,0.0847,0.5136,0.1267,0.42,0.1494\n2,AllGestureWiimoteX,0.9602,0.0207,0.7524,0.0441,0.7311,0.0156\n3,AllGestureWiimoteY,0.9887,0.0061,0.7964,0.0199,0.7592,0.0235\n4,AllGestureWiimoteZ,0.9195,0.0121,0.6970000000000001,0.0369,0.6619,0.0402\n5,ArrowHead,0.8781,0.0487,0.8689,0.0773,0.8296,0.0447\n6,BME,1.0,0.0,1.0,0.0,0.9833,0.0152\n7,Beef,0.6938,0.2374,0.7,0.2327,0.6167,0.1918\n8,BeetleFly,0.875,0.2119,0.85,0.0559,0.775,0.1046\n9,BirdChicken,0.625,0.0884,0.775,0.1046,0.625,0.1768\n10,CBF,0.9918,0.0067,1.0,0.0,0.9959,0.0071\n11,Car,0.8781,0.0777,0.8333,0.1021,0.7167,0.0903\n12,Chinatown,0.925,0.0711,0.9863,0.0137,0.9589,0.0168\n13,ChlorineConcentration,0.8224,0.0745,0.8707,0.0967,0.8708,0.0886\n14,CinCECGTorso,0.9966,0.0045,1.0,0.0,0.9984,0.0035\n15,Coffee,0.975,0.0261,0.9667,0.0745,0.9455,0.122\n16,Computers,0.7242,0.069,0.64,0.0534,0.616,0.0586\n17,CricketX,0.9885,0.006,0.8144,0.0222,0.7398,0.0312\n18,CricketY,0.9901,0.0081,0.7881,0.0364,0.7398,0.044\n19,CricketZ,0.9896,0.0135,0.7931,0.0385,0.746,0.0428\n20,Crop,0.9055,0.0058,0.7884,0.0041,0.7849,0.0049\n21,DiatomSizeReduction,0.9656,0.0423,0.9846,0.0188,0.9752,0.0235\n22,DistalPhalanxOutlineAgeGroup,0.8352,0.0209,0.85,0.0152,0.8125,0.0616\n23,DistalPhalanxOutlineCorrect,0.8188,0.0215,0.8011,0.0204,0.7497,0.0523\n24,DistalPhalanxTW,0.8086,0.0135,0.8148,0.0131,0.7736,0.0353\n25,DodgerLoopDay,0.8062,0.0915,0.65,0.0677,0.5194,0.0572\n26,DodgerLoopGame,0.975,0.0178,0.9187,0.0568,0.8919,0.0439\n27,DodgerLoopWeekend,0.9969,0.006999999999999999,0.975,0.0559,0.955,0.054000000000000006\n28,ECG200,0.9031,0.0639,0.865,0.0548,0.845,0.0597\n29,ECG5000,0.9696,0.0038,0.9582,0.0056,0.9548,0.0075\n30,ECGFiveDays,0.9937,0.004,1.0,0.0,0.9956,0.0058\n31,EOGHorizontalSignal,0.9599,0.0105,0.7643,0.0477,0.6314,0.1079\n32,EOGVerticalSignal,0.9339,0.0194,0.7233,0.0313,0.6072,0.0428\n33,Earthquakes,0.7969,0.1324,0.8125,0.0126,0.7657,0.0249\n34,ElectricDevices,0.9396,0.004,0.8748,0.0027,0.8687,0.0039\n35,EthanolLevel,0.4219,0.1368,0.3994,0.1388,0.3571,0.1544\n36,FaceAll,1.0,0.0,0.9918,0.0025,0.9883,0.006999999999999999\n37,FaceFour,0.975,0.0392,0.9739,0.0238,0.9375,0.0758\n38,FacesUCR,1.0,0.0,0.9945,0.0035,0.9934,0.0043\n39,FiftyWords,0.9984,0.0021,0.7995,0.0183,0.7732,0.0498\n40,Fish,0.9219,0.0475,0.8629,0.048,0.8171,0.0383\n41,FordA,0.9601,0.0121,0.9364,0.0073,0.9339,0.0057\n42,FordB,0.9262,0.0092,0.9151,0.0121,0.9105,0.008\n43,FreezerRegularTrain,0.9878,0.0072,0.9994,0.0009,0.9974,0.0029\n44,FreezerSmallTrain,0.9864,0.0045,0.9997,0.0007,0.9972,0.002\n45,Fungi,0.9875,0.0204,1.0,0.0,0.9705,0.0207\n46,GestureMidAirD1,0.8938,0.0232,0.6147,0.0366,0.618,0.0548\n47,GestureMidAirD2,0.9031,0.0458,0.5353,0.0369,0.4943,0.0612\n48,GestureMidAirD3,0.7641,0.08199999999999999,0.3765,0.0246,0.2959,0.048\n49,GesturePebbleZ1,0.9969,0.0043,0.9311,0.0214,0.9208,0.0532\n50,GesturePebbleZ2,0.9984,0.0035,0.9279,0.034,0.8883,0.0453\n51,GunPoint,0.975,0.036000000000000004,0.995,0.0112,0.955,0.0411\n52,GunPointAgeSpan,0.932,0.0556,0.9758,0.018000000000000002,0.9423,0.0215\n53,GunPointMaleVersusFemale,0.9719,0.0216,0.9978,0.0049,0.9845,0.0061\n54,GunPointOldVersusYoung,0.9688,0.01,0.967,0.0077,0.949,0.033\n55,Ham,0.925,0.0704,0.8837,0.0368,0.8272,0.0686\n56,HandOutlines,0.9044,0.0121,0.924,0.0157,0.8896,0.0235\n57,Haptics,0.718,0.0537,0.5118,0.0345,0.527,0.0278\n58,Herring,0.6875,0.0834,0.7769,0.0918,0.5228,0.0507\n59,HouseTwenty,0.9906,0.013999999999999999,0.8938,0.0474,0.8931,0.0521\n60,InlineSkate,0.8490000000000001,0.1152,0.7344,0.035,0.4828,0.2514\n61,InsectEPGRegularTrain,0.9969,0.0043,0.895,0.0285,0.8748,0.044000000000000004\n62,InsectEPGSmallTrain,0.9563,0.0251,0.8213,0.0381,0.8008,0.0307\n63,InsectWingbeatSound,0.8664,0.0328,0.7403,0.0088,0.7114,0.0256\n64,ItalyPowerDemand,0.9547,0.0177,0.9767,0.0068,0.964,0.0185\n65,LargeKitchenAppliances,0.9271,0.0214,0.7959999999999999,0.048,0.7008,0.0477\n66,Lightning2,0.7406,0.0559,0.8063,0.0823,0.7437,0.0749\n67,Lightning7,0.9187,0.0461,0.7793,0.0393,0.735,0.1006\n68,Mallat,0.9912,0.003,0.9913,0.006999999999999999,0.9874,0.0068\n69,Meat,0.4406,0.2404,0.5167,0.2545,0.5083,0.2981\n70,MedicalImages,0.9172,0.0195,0.843,0.015,0.8082,0.0297\n71,MelbournePedestrian,0.0996,0.0006,0.1051,0.0002,0.1012,0.0012\n72,MiddlePhalanxOutlineAgeGroup,0.7453,0.0306,0.7838,0.0064,0.7562,0.0419\n73,MiddlePhalanxOutlineCorrect,0.8336,0.0208,0.8502,0.0288,0.7903,0.0257\n74,MiddlePhalanxTW,0.6461,0.0273,0.6486,0.0377,0.6329,0.0295\n75,MixedShapesRegularTrain,0.9988,0.0019,0.9443,0.0065,0.9361,0.008\n76,MixedShapesSmallTrain,0.9902,0.0109,0.9342,0.0239,0.9264,0.0157\n77,MoteStrain,0.9828,0.0109,0.9601,0.013999999999999999,0.9434,0.0222\n78,NonInvasiveFetalECGThorax1,0.9821,0.0063,0.9287,0.0047,0.9245,0.0086\n79,NonInvasiveFetalECGThorax2,0.9856,0.0049,0.9377,0.0073,0.9348,0.0038\n80,OSULeaf,0.982,0.0245,0.7371,0.0682,0.6606,0.0348\n81,OliveOil,0.2625,0.114,0.4667,0.1118,0.4167,0.0\n82,PLAID,0.6081,0.0191,0.4855,0.0193,0.4714,0.0395\n83,PhalangesOutlinesCorrect,0.8111,0.0249,0.823,0.0265,0.8021,0.0188\n84,Phoneme,1.0,0.0,0.3883,0.0092,0.3596,0.0197\n85,PickupGestureWiimoteZ,0.9875,0.027999999999999997,0.76,0.0548,0.76,0.0962\n86,PigAirwayPressure,0.7,0.1537,0.0889,0.0241,0.0577,0.0291\n87,PigArtPressure,0.7188,0.1625,0.1587,0.0159,0.0929,0.0175\n88,PigCVP,0.8766,0.0925,0.0921,0.0207,0.0481,0.0228\n89,Plane,0.9719,0.0131,1.0,0.0,0.9762,0.0168\n90,PowerCons,0.9828,0.0102,0.9722,0.017,0.9639,0.0158\n91,ProximalPhalanxOutlineAgeGroup,0.8102,0.0287,0.838,0.0259,0.8479,0.0296\n92,ProximalPhalanxOutlineCorrect,0.8262,0.0373,0.8516,0.0531,0.8259,0.0415\n93,ProximalPhalanxTW,0.7969,0.0199,0.7868,0.0108,0.7702,0.0123\n94,RefrigerationDevices,0.9307,0.0346,0.6851,0.0174,0.5464,0.0827\n95,Rock,0.9812,0.027999999999999997,0.8571,0.0875,0.7571,0.1083\n96,ScreenType,0.7604,0.0629,0.5844,0.0213,0.4532,0.0327\n97,SemgHandGenderCh2,0.991,0.0161,0.9653,0.0094,0.9415,0.0269\n98,SemgHandMovementCh2,1.0,0.0,0.746,0.0235,0.6829999999999999,0.0612\n99,SemgHandSubjectCh2,1.0,0.0,0.9508,0.0194,0.9326,0.018000000000000002\n100,ShakeGestureWiimoteZ,0.9563,0.0474,0.86,0.0894,0.85,0.0612\n101,ShapeletSim,0.8875,0.1766,0.67,0.1178,0.625,0.0919\n102,ShapesAll,0.9881,0.0024,0.7865,0.0186,0.7776,0.0191\n103,SmallKitchenAppliances,0.9125,0.0577,0.8063,0.0164,0.7274,0.0449\n104,SmoothSubspace,0.9844,0.0096,0.9433,0.0508,0.9333,0.0635\n105,SonyAIBORobotSurface1,0.9953,0.0043,0.9872,0.0091,0.9807,0.0122\n106,SonyAIBORobotSurface2,0.993,0.0085,0.9891,0.0101,0.9886,0.0037\n107,StarLightCurves,0.9742,0.0098,0.9740000000000001,0.003,0.972,0.0048\n108,Strawberry,0.9391,0.0133,0.9768,0.0134,0.9589,0.0143\n109,SwedishLeaf,0.9875,0.0053,0.9649,0.0122,0.9474,0.0212\n110,Symbols,0.9984,0.0016,0.9906,0.0045,0.978,0.0154\n111,SyntheticControl,0.9883,0.0048,0.995,0.0046,0.99,0.006999999999999999\n112,ToeSegmentation1,0.9844,0.0166,0.9481,0.0203,0.9033,0.0418\n113,ToeSegmentation2,0.9469,0.0559,0.9055,0.0317,0.7947,0.0823\n114,Trace,0.9438,0.0831,0.93,0.0326,0.88,0.0542\n115,TwoLeadECG,0.9756,0.0208,0.9992,0.0017,0.9956,0.0055\n116,TwoPatterns,0.9994,0.0006,1.0,0.0,0.9988,0.0027\n117,UMD,0.9906,0.013999999999999999,0.9944,0.0124,0.95,0.0362\n118,UWaveGestureLibraryAll,0.9993,0.0016,0.975,0.0034,0.9737,0.0063\n119,UWaveGestureLibraryX,0.9337,0.0228,0.852,0.0051,0.841,0.0134\n120,UWaveGestureLibraryY,0.9223,0.0306,0.7786,0.0141,0.7711,0.0154\n121,UWaveGestureLibraryZ,0.9059,0.0214,0.7777,0.0114,0.7713,0.01\n122,Wafer,0.9991,0.0008,0.9993,0.0009,0.9987,0.0005\n123,Wine,0.5219,0.036000000000000004,0.5787,0.052000000000000005,0.5134,0.0674\n124,WordSynonyms,0.9949,0.0043,0.772,0.0433,0.7445,0.0086\n125,Worms,0.9406,0.0301,0.6038,0.0823,0.5615,0.069\n126,WormsTwoClass,0.8313,0.1518,0.7115,0.0804,0.6744,0.0402\n127,Yoga,0.9683,0.0204,0.9393,0.0334,0.9249,0.0327\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/scripts/fivefold_tstcc_ucr.sh",
    "content": "python main_ucr.py --dataset ACSF1 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Adiac --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset AllGestureWiimoteX --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset AllGestureWiimoteY --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset AllGestureWiimoteZ --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ArrowHead --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset BME --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Beef --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset BeetleFly --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset BirdChicken --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset CBF --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Car --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Chinatown --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ChlorineConcentration --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset CinCECGTorso --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Coffee --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Computers --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset CricketX --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset CricketY --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset CricketZ --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Crop --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset DiatomSizeReduction --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset DistalPhalanxOutlineAgeGroup --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset DistalPhalanxOutlineCorrect --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset DistalPhalanxTW --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset DodgerLoopDay --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset DodgerLoopGame --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset DodgerLoopWeekend --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ECG200 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ECG5000 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ECGFiveDays --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset EOGHorizontalSignal --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset EOGVerticalSignal --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Earthquakes --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ElectricDevices --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset EthanolLevel --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset FaceAll --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset FaceFour --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset FacesUCR --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset FiftyWords --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Fish --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset FordA --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset FordB --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset FreezerRegularTrain --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset FreezerSmallTrain --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Fungi --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GestureMidAirD1 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GestureMidAirD2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GestureMidAirD3 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GesturePebbleZ1 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GesturePebbleZ2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GunPoint --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GunPointAgeSpan --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GunPointMaleVersusFemale --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset GunPointOldVersusYoung --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Ham --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset HandOutlines --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Haptics --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Herring --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset HouseTwenty --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset InlineSkate --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset InsectEPGRegularTrain --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset InsectEPGSmallTrain --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset InsectWingbeatSound --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ItalyPowerDemand --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset LargeKitchenAppliances --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Lightning2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Lightning7 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Mallat --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Meat --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset MedicalImages --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset MelbournePedestrian --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset MiddlePhalanxOutlineAgeGroup --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset MiddlePhalanxOutlineCorrect --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset MiddlePhalanxTW --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset MixedShapesRegularTrain --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset MixedShapesSmallTrain --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset MoteStrain --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset NonInvasiveFetalECGThorax1 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset NonInvasiveFetalECGThorax2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset OSULeaf --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset OliveOil --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset PLAID --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset PhalangesOutlinesCorrect --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Phoneme --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset PickupGestureWiimoteZ --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset PigAirwayPressure --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset PigArtPressure --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset PigCVP --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Plane --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset PowerCons --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ProximalPhalanxOutlineAgeGroup --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ProximalPhalanxOutlineCorrect --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ProximalPhalanxTW --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset RefrigerationDevices --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Rock --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ScreenType --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SemgHandGenderCh2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SemgHandMovementCh2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SemgHandSubjectCh2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ShakeGestureWiimoteZ --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ShapeletSim --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ShapesAll --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SmallKitchenAppliances --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SmoothSubspace --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SonyAIBORobotSurface1 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SonyAIBORobotSurface2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset StarLightCurves --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Strawberry --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SwedishLeaf --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Symbols --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset SyntheticControl --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ToeSegmentation1 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset ToeSegmentation2 --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Trace --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset TwoLeadECG --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset TwoPatterns --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset UMD --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset UWaveGestureLibraryAll --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset UWaveGestureLibraryX --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset UWaveGestureLibraryY --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset UWaveGestureLibraryZ --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Wafer --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Wine --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset WordSynonyms --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Worms --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset WormsTwoClass --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\npython main_ucr.py --dataset Yoga --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42;\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/scripts/fivefold_tstcc_uea.sh",
    "content": "python main_uea.py --dataset ArticularyWordRecognition --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset AtrialFibrillation --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset BasicMotions --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset CharacterTrajectories --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset Cricket --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset DuckDuckGeese --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset EigenWorms --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset Epilepsy --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset EthanolConcentration --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset ERing --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset FaceDetection --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset FingerMovements --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset HandMovementDirection --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset Handwriting --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset Heartbeat --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset InsectWingbeat --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset JapaneseVowels --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset Libras --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset LSST --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset MotorImagery --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset NATOPS --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset PenDigits --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset PEMS-SF --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset PhonemeSpectra --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset RacketSports --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset SelfRegulationSCP1 --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset SelfRegulationSCP2 --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset SpokenArabicDigits --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset StandWalkJump --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset UWaveGestureLibrary --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42;\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/scripts/generator_ucr.py",
    "content": "ucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n               'Beef',\n               'BeetleFly', 'BirdChicken', 'CBF', 'Car', 'Chinatown', 'ChlorineConcentration', 'CinCECGTorso', 'Coffee',\n               'Computers',\n               'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction', 'DistalPhalanxOutlineAgeGroup',\n               'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame', 'DodgerLoopWeekend',\n               'ECG200', 'ECG5000', 'ECGFiveDays', 'EOGHorizontalSignal', 'EOGVerticalSignal', 'Earthquakes',\n               'ElectricDevices',\n               'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords', 'Fish', 'FordA', 'FordB',\n               'FreezerRegularTrain',\n               'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3', 'GesturePebbleZ1',\n               'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan', 'GunPointMaleVersusFemale', 'GunPointOldVersusYoung',\n               'Ham',\n               'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain',\n               'InsectEPGSmallTrain',\n               'InsectWingbeatSound', 'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2', 'Lightning7',\n               'Mallat', 'Meat',\n               'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',\n               'MiddlePhalanxTW', 'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain',\n               'NonInvasiveFetalECGThorax1',\n               'NonInvasiveFetalECGThorax2', 'OSULeaf', 'OliveOil', 'PLAID', 'PhalangesOutlinesCorrect', 'Phoneme',\n               'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'Plane', 'PowerCons',\n               'ProximalPhalanxOutlineAgeGroup', 'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',\n               'RefrigerationDevices',\n               'Rock', 'ScreenType', 'SemgHandGenderCh2', 'SemgHandMovementCh2', 'SemgHandSubjectCh2',\n               'ShakeGestureWiimoteZ',\n               'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace', 'SonyAIBORobotSurface1',\n               'SonyAIBORobotSurface2', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',\n               'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'TwoPatterns', 'UMD',\n               'UWaveGestureLibraryAll',\n               'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine', 'WordSynonyms',\n               'Worms',\n               'WormsTwoClass', 'Yoga']\n\ni = 1\nfor dataset in ucr_dataset:\n    print(\"i = \", i, \", dataset = \", dataset)\n    ## python main_ucr.py --dataset Coffee  --device cuda:1 --save_csv_name tstcc_0327_ --seed 42\n    with open('/SSD/lz/time_tsm/tstcc_cls/scripts/fivefold_tstcc_ucr.sh', 'a') as f:\n        f.write(\n            'python main_ucr.py --dataset ' + dataset + ' --device cuda:0 --save_csv_name tstcc_ucr_0423_ --seed 42' + ';\\n')\n\n    i = i + 1\n\n    ## nohup ./scripts/fivefold_tstcc_ucr.sh &\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/scripts/generator_uea.py",
    "content": "uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Cricket', 'DuckDuckGeese', 'EigenWorms', 'Epilepsy', 'EthanolConcentration', 'ERing',\n           'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting',\n           'Heartbeat', 'InsectWingbeat', 'JapaneseVowels', 'Libras', 'LSST', 'MotorImagery',\n           'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1',\n           'SelfRegulationSCP2', 'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary']\n\ni = 1\nfor dataset in uea_all:\n    print(\"i = \", i, \", dataset = \", dataset)\n    ## python main_uea.py --dataset BasicMotions  --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42\n    with open('/SSD/lz/time_tsm/tstcc_cls/scripts/fivefold_tstcc_uea.sh', 'a') as f:\n        f.write(\n            'python main_uea.py --dataset ' + dataset + ' --device cuda:0 --save_csv_name tstcc_uea_0423_ --seed 42' + ';\\n')\n\n    i = i + 1\n\n    ## nohup ./scripts/fivefold_tstcc_uea.sh &\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/scripts/part_uea_tstcc.sh",
    "content": "python main_uea.py --dataset PEMS-SF --device cuda:1 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset InsectWingbeat --device cuda:1 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset FaceDetection --device cuda:1 --save_csv_name tstcc_uea_0423_ --seed 42;\npython main_uea.py --dataset EigenWorms --device cuda:1 --save_csv_name tstcc_uea_0423_ --seed 42;"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/trainer/trainer.py",
    "content": "import os\nimport sys\n\nsys.path.append(\"..\")\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models.loss import NTXentLoss\n\n\ndef Trainer(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, train_dl, valid_dl, test_dl,\n            device, logger, config, experiment_log_dir, training_mode):\n    # Start training\n    logger.debug(\"Training started ....\")\n\n    criterion = nn.CrossEntropyLoss()\n    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')\n\n    for epoch in range(1, config.num_epoch + 1):\n        # Train and validate\n        train_loss, train_acc = model_train(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, criterion, train_dl, config, device, training_mode)\n        valid_loss, valid_acc, _, _ = model_evaluate(model, temporal_contr_model, valid_dl, device, training_mode)\n        if training_mode != 'self_supervised':  # use scheduler in all other modes.\n            scheduler.step(valid_loss)\n\n        logger.debug(f'\\nEpoch : {epoch}\\n'\n                     f'Train Loss     : {train_loss:.4f}\\t | \\tTrain Accuracy     : {train_acc:2.4f}\\n'\n                     f'Valid Loss     : {valid_loss:.4f}\\t | \\tValid Accuracy     : {valid_acc:2.4f}')\n\n    os.makedirs(os.path.join(experiment_log_dir, \"saved_models\"), exist_ok=True)\n    chkpoint = {'model_state_dict': model.state_dict(), 'temporal_contr_model_state_dict': temporal_contr_model.state_dict()}\n    torch.save(chkpoint, os.path.join(experiment_log_dir, \"saved_models\", f'ckp_last.pt'))\n\n    if training_mode != \"self_supervised\":  # no need to run the evaluation for self-supervised mode.\n        # evaluate on the test set\n        logger.debug('\\nEvaluate on the Test set:')\n        test_loss, test_acc, _, _ = model_evaluate(model, temporal_contr_model, test_dl, device, training_mode)\n        logger.debug(f'Test loss      :{test_loss:0.4f}\\t | Test Accuracy      : {test_acc:0.4f}')\n\n    logger.debug(\"\\n################## Training is Done! #########################\")\n\n\ndef Trainer_cls(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, train_dl, valid_dl, test_dl, device, logger, config, experiment_log_dir, training_mode):\n    # Start training\n    logger.debug(\"Training started ....\")\n\n    criterion = nn.CrossEntropyLoss()\n    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')\n\n    max_val_acc = 0\n    end_train_acc = 0\n    test_acc = 0\n    test_loss = 0\n    max_epoch = 0\n    for epoch in range(1, config.num_epoch + 1):\n        # Train and validate\n        train_loss, train_acc = model_train(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, criterion, train_dl, config, device, training_mode)\n        valid_loss, valid_acc, _, _ = model_evaluate(model, temporal_contr_model, valid_dl, device, training_mode)\n        if valid_acc > max_val_acc:\n            max_epoch = epoch\n            max_val_acc = valid_acc\n            end_train_acc = train_acc\n            test_loss, test_acc, _, _ = model_evaluate(model, temporal_contr_model, test_dl, device, training_mode)\n        if training_mode != 'self_supervised':  # use scheduler in all other modes.\n            scheduler.step(valid_loss)\n        if epoch % 100 == 0:\n            logger.debug(f'\\nEpoch : {epoch}\\n'\n                         f'Train Loss     : {train_loss:.4f}\\t | \\tTrain Accuracy     : {train_acc:2.4f}\\n'\n                         f'Valid Loss     : {valid_loss:.4f}\\t | \\tValid Accuracy     : {valid_acc:2.4f}')\n\n    # os.makedirs(os.path.join(experiment_log_dir, \"saved_models\"), exist_ok=True)\n    # chkpoint = {'model_state_dict': model.state_dict(), 'temporal_contr_model_state_dict': temporal_contr_model.state_dict()}\n    # torch.save(chkpoint, os.path.join(experiment_log_dir, \"saved_models\", f'ckp_last.pt'))\n\n    if training_mode != \"self_supervised\":  # no need to run the evaluation for self-supervised mode.\n        # evaluate on the test set\n        logger.debug('\\nEvaluate on the Test set:')\n        # test_loss, test_acc, _, _ = model_evaluate(model, temporal_contr_model, test_dl, device, training_mode)\n        logger.debug(f'Test loss      :{test_loss:0.4f}\\t | Test Accuracy      : {test_acc:0.4f}\\t | max_epoch  = {max_epoch}')\n\n    logger.debug(\"\\n################## Training is Done! #########################\")\n\n    return end_train_acc, max_val_acc, test_acc\n\n\ndef model_train(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, criterion, train_loader, config, device, training_mode):\n    total_loss = []\n    total_acc = []\n    model.train()\n    temporal_contr_model.train()\n\n    for batch_idx, (data, labels, aug1, aug2) in enumerate(train_loader):\n        # send to device\n        data, labels = data.float().to(device), labels.long().to(device)\n        aug1, aug2 = aug1.float().to(device), aug2.float().to(device)\n\n        # optimizer\n        model_optimizer.zero_grad()\n        temp_cont_optimizer.zero_grad()\n\n        if training_mode == \"self_supervised\":\n            predictions1, features1 = model(aug1)\n            predictions2, features2 = model(aug2)\n\n            # normalize projection feature vectors\n            features1 = F.normalize(features1, dim=1)\n            features2 = F.normalize(features2, dim=1)\n\n            temp_cont_loss1, temp_cont_lstm_feat1 = temporal_contr_model(features1, features2)\n            temp_cont_loss2, temp_cont_lstm_feat2 = temporal_contr_model(features2, features1)\n\n            # normalize projection feature vectors\n            zis = temp_cont_lstm_feat1 \n            zjs = temp_cont_lstm_feat2 \n\n        else:\n            output = model(data)\n\n        # compute loss\n        if training_mode == \"self_supervised\":\n            lambda1 = 1\n            lambda2 = 0.7\n            nt_xent_criterion = NTXentLoss(device, config.batch_size, config.Context_Cont.temperature,\n                                           config.Context_Cont.use_cosine_similarity)\n            loss = (temp_cont_loss1 + temp_cont_loss2) * lambda1 +  nt_xent_criterion(zis, zjs) * lambda2\n            \n        else: # supervised training or fine tuining\n            predictions, features = output\n            loss = criterion(predictions, labels)\n            total_acc.append(labels.eq(predictions.detach().argmax(dim=1)).float().mean())\n\n        total_loss.append(loss.item())\n        loss.backward()\n        model_optimizer.step()\n        temp_cont_optimizer.step()\n\n    total_loss = torch.tensor(total_loss).mean()\n\n    if training_mode == \"self_supervised\":\n        total_acc = 0\n    else:\n        total_acc = torch.tensor(total_acc).mean()\n    return total_loss, total_acc\n\n\ndef model_evaluate(model, temporal_contr_model, test_dl, device, training_mode):\n    model.eval()\n    temporal_contr_model.eval()\n\n    total_loss = []\n    total_acc = []\n\n    criterion = nn.CrossEntropyLoss()\n    outs = np.array([])\n    trgs = np.array([])\n\n    with torch.no_grad():\n        for data, labels, _, _ in test_dl:\n            data, labels = data.float().to(device), labels.long().to(device)\n\n            if training_mode == \"self_supervised\":\n                pass\n            else:\n                output = model(data)\n\n            # compute loss\n            if training_mode != \"self_supervised\":\n                predictions, features = output\n                loss = criterion(predictions, labels)\n                total_acc.append(labels.eq(predictions.detach().argmax(dim=1)).float().mean())\n                total_loss.append(loss.item())\n\n            if training_mode != \"self_supervised\":\n                pred = predictions.max(1, keepdim=True)[1]  # get the index of the max log-probability\n                outs = np.append(outs, pred.cpu().numpy())\n                trgs = np.append(trgs, labels.data.cpu().numpy())\n\n    if training_mode != \"self_supervised\":\n        total_loss = torch.tensor(total_loss).mean()  # average loss\n    else:\n        total_loss = 0\n    if training_mode == \"self_supervised\":\n        total_acc = 0\n        return total_loss, total_acc, [], []\n    else:\n        total_acc = torch.tensor(total_acc).mean()  # average acc\n    return total_loss, total_acc, outs, trgs\n"
  },
  {
    "path": "ts_classification_methods/tstcc_cls/utils.py",
    "content": "import logging\nimport os\nimport random\nimport sys\nfrom shutil import copy\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.metrics import classification_report, cohen_kappa_score, confusion_matrix, accuracy_score\n\nfrom tstcc_cls.dataloader.dataloader import Load_Dataset\n\n\ndef generator_ucr_config(data, label, configs):\n    X = np.reshape(data, (data.shape[0], -1))\n    Y = label\n    num_class = np.unique(Y).shape[0]\n    series_len = X.shape[1]\n    for i in range(3):\n        if series_len % 2 == 1:\n            series_len = series_len + 3\n            series_len = series_len // 2\n        else:\n            series_len = series_len // 2 + 1\n\n    configs.features_len = series_len\n    configs.num_classes = num_class\n\n    while X.shape[0] < configs.batch_size:\n        configs.batch_size = configs.batch_size // 2\n    # print(\"num_class = \", num_class, \", features_len = \", features_len)\n\n\ndef generator_ucr(data, label, configs, training_mode, drop_last=True):\n    # print(\"Raw data shape = \", data.shape)\n    data = np.reshape(data, (data.shape[0], -1))\n    # print(\"New data shape = \", data.shape)\n    data_dict = dict()\n    data_dict[\"samples\"] = torch.from_numpy(data).unsqueeze(1)\n    # print(\"samples data shape = \", data_dict[\"samples\"].shape)\n    data_dict[\"labels\"] = torch.from_numpy(label)\n\n    tr_dataset = Load_Dataset(data_dict, configs, training_mode)\n\n    tr_loader = torch.utils.data.DataLoader(dataset=tr_dataset, batch_size=configs.batch_size,\n                                            shuffle=True, drop_last=drop_last,\n                                            num_workers=0)\n\n    return tr_loader\n\n\ndef generator_uea_config(data, label, configs):\n    Y = label\n    num_class = np.unique(Y).shape[0]\n    series_len = data.shape[1]\n    for i in range(3):\n        if series_len % 2 == 1:\n            series_len = series_len + 3\n            series_len = series_len // 2\n        else:\n            series_len = series_len // 2 + 1\n\n    configs.features_len = series_len\n    configs.num_classes = num_class\n    configs.input_channels = data.shape[2]\n\n    while data.shape[0] < configs.batch_size:\n        configs.batch_size = configs.batch_size // 2\n\n\ndef generator_uea(data, label, configs, training_mode, drop_last=True):\n    data_dict = dict()\n    # print(\"shape = \", data.shape)\n    data_dict[\"samples\"] = torch.from_numpy(data)\n    data_dict[\"labels\"] = torch.from_numpy(label)\n\n    tr_dataset = Load_Dataset(data_dict, configs, training_mode)\n\n    tr_loader = torch.utils.data.DataLoader(dataset=tr_dataset, batch_size=configs.batch_size,\n                                            shuffle=True, drop_last=drop_last,\n                                            num_workers=0)\n\n    return tr_loader\n\n\ndef set_requires_grad(model, dict_, requires_grad=True):\n    for param in model.named_parameters():\n        if param[0] in dict_:\n            param[1].requires_grad = requires_grad\n\n\ndef fix_randomness(SEED):\n    random.seed(SEED)\n    np.random.seed(SEED)\n    torch.manual_seed(SEED)\n    torch.cuda.manual_seed(SEED)\n    torch.backends.cudnn.deterministic = True\n\n\ndef epoch_time(start_time, end_time):\n    elapsed_time = end_time - start_time\n    elapsed_mins = int(elapsed_time / 60)\n    elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n    return elapsed_mins, elapsed_secs\n\n\ndef _calc_metrics(pred_labels, true_labels, log_dir, home_path):\n    pred_labels = np.array(pred_labels).astype(int)\n    true_labels = np.array(true_labels).astype(int)\n\n    # save targets\n    labels_save_path = os.path.join(log_dir, \"labels\")\n    os.makedirs(labels_save_path, exist_ok=True)\n    np.save(os.path.join(labels_save_path, \"predicted_labels.npy\"), pred_labels)\n    np.save(os.path.join(labels_save_path, \"true_labels.npy\"), true_labels)\n\n    r = classification_report(true_labels, pred_labels, digits=6, output_dict=True)\n    cm = confusion_matrix(true_labels, pred_labels)\n    df = pd.DataFrame(r)\n    df[\"cohen\"] = cohen_kappa_score(true_labels, pred_labels)\n    df[\"accuracy\"] = accuracy_score(true_labels, pred_labels)\n    df = df * 100\n\n    # save classification report\n    exp_name = os.path.split(os.path.dirname(log_dir))[-1]\n    training_mode = os.path.basename(log_dir)\n    file_name = f\"{exp_name}_{training_mode}_classification_report.xlsx\"\n    report_Save_path = os.path.join(home_path, log_dir, file_name)\n    df.to_excel(report_Save_path)\n\n    # save confusion matrix\n    cm_file_name = f\"{exp_name}_{training_mode}_confusion_matrix.torch\"\n    cm_Save_path = os.path.join(home_path, log_dir, cm_file_name)\n    torch.save(cm, cm_Save_path)\n\n\ndef _logger(logger_name, level=logging.DEBUG):\n    \"\"\"\n    Method to return a custom logger with the given name and level\n    \"\"\"\n    logger = logging.getLogger(logger_name)\n    logger.setLevel(level)\n    # format_string = (\"%(asctime)s — %(name)s — %(levelname)s — %(funcName)s:\"\n    #                 \"%(lineno)d — %(message)s\")\n    format_string = \"%(message)s\"\n    log_format = logging.Formatter(format_string)\n    # Creating and adding the console handler\n    console_handler = logging.StreamHandler(sys.stdout)\n    console_handler.setFormatter(log_format)\n    logger.addHandler(console_handler)\n    # Creating and adding the file handler\n    file_handler = logging.FileHandler(logger_name, mode='a')\n    file_handler.setFormatter(log_format)\n    logger.addHandler(file_handler)\n    return logger\n\n\ndef copy_Files(destination, data_type):\n    destination_dir = os.path.join(destination, \"model_files\")\n    os.makedirs(destination_dir, exist_ok=True)\n    copy(\"main.py\", os.path.join(destination_dir, \"main.py\"))\n    copy(\"trainer/trainer.py\", os.path.join(destination_dir, \"trainer.py\"))\n    copy(f\"config_files/{data_type}_Configs.py\", os.path.join(destination_dir, f\"{data_type}_Configs.py\"))\n    copy(\"dataloader/augmentations.py\", os.path.join(destination_dir, \"augmentations.py\"))\n    copy(\"dataloader/dataloader.py\", os.path.join(destination_dir, \"dataloader.py\"))\n    copy(f\"models/model.py\", os.path.join(destination_dir, f\"model.py\"))\n    copy(\"models/loss.py\", os.path.join(destination_dir, \"loss.py\"))\n    copy(\"models/TC.py\", os.path.join(destination_dir, \"TC.py\"))\n"
  },
  {
    "path": "ts_classification_methods/visualize.py",
    "content": "import argparse\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn\nfrom scipy.interpolate import interp1d\n\nfrom data import normalize_per_series\nfrom model import FCN, DilatedConvolutionVis, Classifier\nfrom tsm_utils import load_data, transfer_labels, set_seed\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\nDEVICE = torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\n\n\ndef heatmap(xs, ys, dataset_name='MixedShapesSmallTrain', num_class=5, cls=4):\n    model = FCN(num_class)\n    model.to(DEVICE)\n\n    ts1 = plt.subplot2grid((2, 15), loc=(0, 0), colspan=4, rowspan=1)\n    hm1 = plt.subplot2grid((2, 15), loc=(1, 0), colspan=4)\n    ts2 = plt.subplot2grid((2, 15), loc=(0, 5), colspan=4, rowspan=1)\n    hm2 = plt.subplot2grid((2, 15), loc=(1, 5), colspan=4)\n    ts3 = plt.subplot2grid((2, 15), loc=(0, 10), colspan=4, rowspan=1)\n    hm3 = plt.subplot2grid((2, 15), loc=(1, 10), colspan=4)\n\n    x0s = xs[np.where(ys == cls)]\n    x0_mean = np.mean(x0s, axis=1)\n    x0_mean_mean = np.mean(x0_mean, axis=0)\n    class0 = x0s[np.where(np.abs(x0_mean - x0_mean_mean) == min(np.abs(x0_mean - x0_mean_mean)))[0][0]]\n    x1 = class0\n    x_copy = x1\n    # direct cls\n    model.load_state_dict(\n        torch.load('./visuals/' + dataset_name + '/direct_fcn_linear_encoder_weights.pt', map_location='cuda:0'))\n    model.eval()\n    ts1.set_title('Direct classification')\n    ts1.plot(range(x_copy.shape[0]), x_copy)\n    x1 = torch.from_numpy(x1).to(DEVICE)\n    x1 = torch.unsqueeze(x1, 0)\n    x1 = torch.unsqueeze(x1, 0)\n    gaps, feature = model(x1, vis=True)\n    gaps = torch.squeeze(gaps)\n    feature = torch.squeeze(feature)\n    feature = feature[torch.topk((gaps - gaps.mean()) ** 2, k=16).indices, :].cpu()\n    hm1.pcolormesh(feature[0:16], shading='nearest')\n\n    # supervised transfer\n    # model.load_state_dict(torch.load('./visuals/' + dataset_name + '/fcn_nonlinear_encoder_finetune_weights_UWaveGestureLibraryZ.pt',map_location='cuda:0'))\n    model.load_state_dict(\n        torch.load('./visuals/' + dataset_name + '/fcn_linear_encoder_finetune_weights_UWaveGestureLibraryZ.pt',\n                   map_location='cuda:0'))\n    model.eval()\n    ts2.set_title('Positive transfer')\n    ts2.plot(range(x_copy.shape[0]), x_copy)\n    gaps, feature = model(x1, vis=True)\n    gaps = torch.squeeze(gaps)\n    feature = torch.squeeze(feature)\n    feature = feature[torch.topk((gaps - gaps.mean()) ** 2, k=16).indices, :].cpu()\n    hm2.pcolormesh(feature[0:16], shading='nearest')\n\n    # model.load_state_dict(torch.load('./visuals/' + dataset_name + '/fcn_nonlinear_encoder_finetune_weights_ElectricDevices.pt',map_location='cuda:0'))\n    model.load_state_dict(\n        torch.load('./visuals/' + dataset_name + '/fcn_linear_encoder_finetune_weights_Crop.pt',\n                   map_location='cuda:0'))\n    model.eval()\n    ts3.set_title('Negative transfer')\n    ts3.plot(range(x_copy.shape[0]), x_copy)\n    gaps, feature = model(x1, vis=True)\n    gaps = torch.squeeze(gaps)\n    feature = torch.squeeze(feature)\n    feature = feature[torch.topk((gaps - gaps.mean()) ** 2, k=16).indices, :].cpu()\n    hm3.pcolormesh(feature[0:16], shading='nearest')\n\n    plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.15, hspace=0.30)\n    plt.tight_layout()\n    plt.savefig('./visuals/' + dataset_name + '_postive_negative.png')\n    plt.savefig('./visuals/' + dataset_name + '_postive_negative.pdf')\n\n\ndef multi_cam(xs, ys):\n    # sampling\n    x0s = xs[np.where(ys == 0)]\n    x1s = xs[np.where(ys == 1)]\n\n    x0_mean = np.mean(x0s, axis=1)\n    x0_mean_mean = np.mean(x0_mean, axis=0)\n    class0 = x0s[np.where(np.abs(x0_mean - x0_mean_mean) == min(np.abs(x0_mean - x0_mean_mean)))]\n    # class0 = np.expand_dims(class0, 0)\n    print(class0.shape)\n\n    x1_mean = np.mean(x1s, axis=1)\n    x1_mean_mean = np.mean(x1_mean, axis=0)\n    class1 = x1s[np.where(np.abs(x1_mean - x1_mean_mean) == min(np.abs(x1_mean - x1_mean_mean)))][0]\n    class1 = np.expand_dims(class1, 0)\n    print(class1.shape)\n\n    # print(class0.mean())\n    # print(class1.mean())\n    def cam(x, label):\n        x = torch.from_numpy(x).to(DEVICE)\n        # x = torch.unsqueeze(x, 0)\n        x = torch.unsqueeze(x, 0)\n        features, vis_out = model(x, vis=True)\n        pred = classifier(features)\n\n        w_k_c = classifier.state_dict()['dense.weight']\n        cas = np.zeros(dtype=np.float16, shape=(vis_out.shape[2]))\n        for k, w in enumerate(w_k_c[label, :]):\n            cas += (w * vis_out[0, k, :]).cpu().numpy()\n\n        minimum = np.min(cas)\n        # print(cas)\n        cas = cas - minimum\n        cas = cas / max(cas)\n        cas = cas * 100\n\n        x = x.cpu().numpy()\n        plt_x = np.linspace(0, x.shape[2] - 1, 2000, endpoint=True)\n\n        f = interp1d(range(x.shape[2]), x.squeeze())\n        y = f(plt_x)\n\n        f = interp1d(range(x.shape[2]), cas)\n        cas = f(plt_x).astype(int)\n\n        plt.scatter(x=plt_x, y=y, c=cas, cmap='jet', marker='.', s=2, vmin=0, vmax=100, linewidths=1.0)\n\n        plt.yticks([-1.0, 0.0, 1.0])\n\n    plt.figure()\n    model = FCN(2).to(DEVICE)\n    classifier = Classifier(128, 2).to(DEVICE)\n    model.load_state_dict(torch.load('./visuals/GunPoint/direct_fcn_encoder.pt', map_location='cuda:0'))\n    classifier.load_state_dict(torch.load('./visuals/GunPoint/direct_fcn_classifier.pt', map_location='cuda:0'))\n    model.eval()\n    classifier.eval()\n    x1 = torch.from_numpy(xs).to(DEVICE)\n    x1 = torch.unsqueeze(x1, 1)\n    features, _ = model(x1, vis=True)\n    val_pred = features\n    val_pred = classifier(val_pred)\n    ys1 = torch.from_numpy(ys).to(DEVICE)\n    val_accu = torch.sum(torch.argmax(val_pred.data, axis=1) == ys1)\n    val_accu = val_accu / len(ys)\n    print(\"val accuracy direct = \", val_accu)\n\n    ax1 = plt.subplot(4, 1, 1)\n    plt.title('Direct classification via FCN (100%)')\n    cam(class0, 0)\n    cam(class1, 1)\n\n    model = DilatedConvolutionVis(in_channels=1, embedding_channels=40, out_channels=320, depth=3,\n                                  reduced_size=320, kernel_size=3, num_classes=2).to(DEVICE)\n    classifier = Classifier(320, 2).to(DEVICE)\n    model.load_state_dict(\n        torch.load('./visuals/GunPoint/direct_dilated_encoder.pt', map_location='cuda:0'))\n    classifier.load_state_dict(\n        torch.load('./visuals/GunPoint/direct_dilated_classifier.pt', map_location='cuda:0'))\n    model.eval()\n    classifier.eval()\n    features, _ = model(x1, vis=True)\n    val_pred = features\n    val_pred = classifier(val_pred)\n    ys1 = torch.from_numpy(ys).to(DEVICE)\n    val_accu = torch.sum(torch.argmax(val_pred.data, axis=1) == ys1)\n    val_accu = val_accu / len(ys)\n    print(\"val accuracy dilated = \", val_accu)\n\n    ax2 = plt.subplot(4, 1, 2)\n    plt.title('Direct classification via TCN (50%)')\n    cam(class0, 0)\n    cam(class1, 1)\n\n    model = FCN(2).to(DEVICE)\n    classifier = Classifier(128, 2).to(DEVICE)\n    model.load_state_dict(\n        torch.load('./visuals/GunPoint/supervised_encoder_UWaveGestureLibraryX_linear.pt', map_location='cuda:0'))\n    classifier.load_state_dict(\n        torch.load('./visuals/GunPoint/supervised_classifier_UWaveGestureLibraryX_linear.pt', map_location='cuda:0'))\n    model.eval()\n    classifier.eval()\n    features, _ = model(x1, vis=True)\n    val_pred = features\n    val_pred = classifier(val_pred)\n    ys1 = torch.from_numpy(ys).to(DEVICE)\n    val_accu = torch.sum(torch.argmax(val_pred.data, axis=1) == ys1)\n    val_accu = val_accu / len(ys)\n    print(\"val accuracy supervised = \", val_accu)\n\n    ax3 = plt.subplot(4, 1, 3)\n    plt.title('Supervised transfer via FCN (100%)')\n    cam(class0, 0)\n    cam(class1, 1)\n\n    model.load_state_dict(\n        torch.load('./visuals/GunPoint/unsupervised_encoder_UWaveGestureLibraryX_linear.pt', map_location='cuda:0'))\n    classifier.load_state_dict(\n        torch.load('./visuals/GunPoint/unsupervised_classifier_UWaveGestureLibraryX_linear.pt', map_location='cuda:0'))\n    model.eval()\n    classifier.eval()\n    features, _ = model(x1, vis=True)\n    val_pred = features\n    val_pred = classifier(val_pred)\n    ys1 = torch.from_numpy(ys).to(DEVICE)\n    val_accu = torch.sum(torch.argmax(val_pred.data, axis=1) == ys1)\n    val_accu = val_accu / len(ys)\n    print(\"val accuracy unsupervised = \", val_accu)\n\n    ax4 = plt.subplot(4, 1, 4)\n    plt.title('Unsupervised transfer via FCN decoder (98.5%)')\n    cam(class0, 0)\n    cam(class1, 1)\n\n    plt.colorbar(ax=[ax1, ax2, ax3, ax4])  # Add a color scale bar on the right side\n    plt.subplots_adjust(left=None, bottom=None, right=0.75, top=None, wspace=0.00, hspace=0.9)\n    # plt.tight_layout()\n    plt.savefig('./visuals/fcn_dilated_supervised_unsupervised_transfer.png', bbox_inches='tight')\n    plt.savefig('./visuals/fcn_dilated_supervised_unsupervised_transfer.pdf', bbox_inches='tight')\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataroot', type=str, default='/SSD/lz/UCRArchive_2018',\n                        help='data root')  ## /dev_data/zzj/hzy/datasets/UCR\n    parser.add_argument('--dataset', type=str, default='GunPoint',\n                        help='dataset name')  ## Wine GunPoint FreezerSmallTrain MixedShapesSmallTrain\n    parser.add_argument('--backbone', type=str, choices=['dilated', 'fcn'], default='fcn', help='encoder backbone')\n    parser.add_argument('--graph', type=str, choices=['cam', 'heatmap', 'tsne'], default='cam')\n    parser.add_argument('--random_seed', type=int, default=42, help='shuffle seed')\n\n    args = parser.parse_args()\n    set_seed(args)\n\n    xs, ys, num_classes = load_data(args.dataroot, args.dataset)\n    xs = normalize_per_series(xs)\n    ys = transfer_labels(ys)\n\n    if args.graph == 'cam':\n        multi_cam(xs, ys)\n    elif args.graph == 'heatmap':\n        heatmap(xs, ys, dataset_name='Wine', num_class=2, cls=0)\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/CODEOWNERS",
    "content": "# Comment line immediately above ownership line is reserved for related other information. Please be careful while editing.\n#ECCN:Open Source"
  },
  {
    "path": "ts_forecasting_methods/CoST/CODE_OF_CONDUCT.md",
    "content": "# Salesforce Open Source Community Code of Conduct\n\n## About the Code of Conduct\n\nEquality is a core value at Salesforce. We believe a diverse and inclusive\ncommunity fosters innovation and creativity, and are committed to building a\nculture where everyone feels included.\n\nSalesforce open-source projects are committed to providing a friendly, safe, and\nwelcoming environment for all, regardless of gender identity and expression,\nsexual orientation, disability, physical appearance, body size, ethnicity, nationality, \nrace, age, religion, level of experience, education, socioeconomic status, or \nother similar personal characteristics.\n\nThe goal of this code of conduct is to specify a baseline standard of behavior so\nthat people with different social values and communication styles can work\ntogether effectively, productively, and respectfully in our open source community.\nIt also establishes a mechanism for reporting issues and resolving conflicts.\n\nAll questions and reports of abusive, harassing, or otherwise unacceptable behavior\nin a Salesforce open-source project may be reported by contacting the Salesforce\nOpen Source Conduct Committee at ossconduct@salesforce.com.\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as\ncontributors and maintainers pledge to making participation in our project and\nour community a harassment-free experience for everyone, regardless of gender \nidentity and expression, sexual orientation, disability, physical appearance, \nbody size, ethnicity, nationality, race, age, religion, level of experience, education, \nsocioeconomic status, or other similar personal characteristics.\n\n## Our Standards\n\nExamples of behavior that contributes to creating a positive environment\ninclude:\n\n* Using welcoming and inclusive language\n* Being respectful of differing viewpoints and experiences\n* Gracefully accepting constructive criticism\n* Focusing on what is best for the community\n* Showing empathy toward other community members\n\nExamples of unacceptable behavior by participants include:\n\n* The use of sexualized language or imagery and unwelcome sexual attention or\nadvances\n* Personal attacks, insulting/derogatory comments, or trolling\n* Public or private harassment\n* Publishing, or threatening to publish, others' private information—such as\na physical or electronic address—without explicit permission\n* Other conduct which could reasonably be considered inappropriate in a\nprofessional setting\n* Advocating for or encouraging any of the above behaviors\n\n## Our Responsibilities\n\nProject maintainers are responsible for clarifying the standards of acceptable\nbehavior and are expected to take appropriate and fair corrective action in\nresponse to any instances of unacceptable behavior.\n\nProject maintainers have the right and responsibility to remove, edit, or\nreject comments, commits, code, wiki edits, issues, and other contributions\nthat are not aligned with this Code of Conduct, or to ban temporarily or\npermanently any contributor for other behaviors that they deem inappropriate,\nthreatening, offensive, or harmful.\n\n## Scope\n\nThis Code of Conduct applies both within project spaces and in public spaces\nwhen an individual is representing the project or its community. Examples of\nrepresenting a project or community include using an official project email\naddress, posting via an official social media account, or acting as an appointed\nrepresentative at an online or offline event. Representation of a project may be\nfurther defined and clarified by project maintainers.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be\nreported by contacting the Salesforce Open Source Conduct Committee \nat ossconduct@salesforce.com. All complaints will be reviewed and investigated \nand will result in a response that is deemed necessary and appropriate to the \ncircumstances. The committee is obligated to maintain confidentiality with \nregard to the reporter of an incident. Further details of specific enforcement \npolicies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good\nfaith may face temporary or permanent repercussions as determined by other\nmembers of the project's leadership and the Salesforce Open Source Conduct \nCommittee.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][contributor-covenant-home],\nversion 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html. \nIt includes adaptions and additions from [Go Community Code of Conduct][golang-coc], \n[CNCF Code of Conduct][cncf-coc], and [Microsoft Open Source Code of Conduct][microsoft-coc].\n\nThis Code of Conduct is licensed under the [Creative Commons Attribution 3.0 License][cc-by-3-us].\n\n[contributor-covenant-home]: https://www.contributor-covenant.org (https://www.contributor-covenant.org/)\n[golang-coc]: https://golang.org/conduct\n[cncf-coc]: https://github.com/cncf/foundation/blob/master/code-of-conduct.md\n[microsoft-coc]: https://opensource.microsoft.com/codeofconduct/\n[cc-by-3-us]: https://creativecommons.org/licenses/by/3.0/us/"
  },
  {
    "path": "ts_forecasting_methods/CoST/LICENSE.txt",
    "content": "Copyright (c) 2022, Salesforce.com, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n\n* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
  },
  {
    "path": "ts_forecasting_methods/CoST/README.md",
    "content": "# CoST: Contrastive Learning of Disentangled Seasonal-Trend Representations for Time Series Forecasting\n\n\nOfficial PyTorch code repository for the [CoST paper](https://openreview.net/forum?id=PilZY3omXV2).\n\n## Data\n\nThe datasets can be obtained and put into `datasets/` folder in the following way:\n\n* [3 ETT datasets](https://github.com/zhouhaoyi/ETDataset) should be placed at `datasets/ETTh1.csv`, `datasets/ETTh2.csv` and `datasets/ETTm1.csv`.\n* [Electricity dataset](https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014) placed at `datasets/LD2011_2014.txt` and run `electricity.py`.\n\n## Usage\n\nTo train and evaluate CoST on a dataset, run the following command:\n\n```train & evaluate\npython train.py <dataset_name> <run_name> --archive <archive> --batch-size <batch_size> --repr-dims <repr_dims> --gpu <gpu> --eval\n```\nThe detailed descriptions about the arguments are as following:\n| Parameter name | Description of parameter |\n| --- | --- |\n| dataset_name | The dataset name |\n| run_name | The folder name used to save model, output and evaluation metrics. This can be set to any word |\n| archive | The archive name that the dataset belongs to. This can be set to `forecast_csv` or `forecast_csv_univar` |\n| batch_size | The batch size (defaults to 8) |\n| repr_dims | The representation dimensions (defaults to 320) |\n| gpu | The gpu no. used for training and inference (defaults to 0) |\n| eval | Whether to perform evaluation after training |\n| kernels | Kernel sizes for mixture of AR experts module |\n| alpha | Weight for loss function |\n\n(For descriptions of more arguments, run `python train.py -h`.)\n\nAfter training and evaluation, the trained encoder, output and evaluation metrics can be found in `training/<DatasetName>/<RunName>_<Date>_<Time>/`. \n\n**Scripts:** The scripts for reproduction are provided in `scripts/` folder.\n\n## FAQs\n**Q**: ValueError: Found array with dim 4. StandardScaler expected <= 2.\n\n**A**: Please install the appropriate package requirements as found in ```requirements.txt```, in particular, ```scikit_learn==0.24.1```.\n\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/SECURITY.md",
    "content": "## Security\n\nPlease report any security issue to [security@salesforce.com](mailto:security@salesforce.com)\nas soon as it is discovered. This library limits its runtime dependencies in\norder to reduce the total cost of ownership as much as can be, but all consumers\nshould remain vigilant and have their security stakeholders review all third-party\nproducts (3PP) like this one and their dependencies."
  },
  {
    "path": "ts_forecasting_methods/CoST/cost.py",
    "content": "import sys, math, random, copy\nfrom typing import Union, Callable, Optional, List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft as fft\nfrom torch.utils.data import TensorDataset, DataLoader, Dataset\n\nimport numpy as np\nfrom einops import rearrange, repeat, reduce\n\nfrom models.encoder import CoSTEncoder\nfrom utils import take_per_row, split_with_nan, centerize_vary_length_series, torch_pad_nan\n\n\nclass PretrainDataset(Dataset):\n\n    def __init__(self,\n                 data,\n                 sigma,\n                 p=0.5,\n                 multiplier=10):\n        super().__init__()\n        self.data = data\n        self.p = p\n        self.sigma = sigma\n        self.multiplier = multiplier\n        self.N, self.T, self.D = data.shape # num_ts, time, dim\n\n    def __getitem__(self, item):\n        ts = self.data[item % self.N]\n        return self.transform(ts), self.transform(ts)\n\n    def __len__(self):\n        return self.data.size(0) * self.multiplier\n\n    def transform(self, x):\n        return self.jitter(self.shift(self.scale(x)))\n\n    def jitter(self, x):\n        if random.random() > self.p:\n            return x\n        return x + (torch.randn(x.shape) * self.sigma)\n\n    def scale(self, x):\n        if random.random() > self.p:\n            return x\n        return x * (torch.randn(x.size(-1)) * self.sigma + 1)\n\n    def shift(self, x):\n        if random.random() > self.p:\n            return x\n        return x + (torch.randn(x.size(-1)) * self.sigma)\n\n\nclass CoSTModel(nn.Module):\n    def __init__(self,\n                 encoder_q: nn.Module, encoder_k: nn.Module,\n                 kernels: List[int],\n                 device: Optional[str] = 'cuda',\n                 dim: Optional[int] = 128,\n                 alpha: Optional[float] = 0.05,\n                 K: Optional[int] = 65536,\n                 m: Optional[float] = 0.999,\n                 T: Optional[float] = 0.07):\n        super().__init__()\n\n        self.K = K\n        self.m = m\n        self.T = T\n        self.device = device\n\n        self.kernels = kernels\n\n        self.alpha = alpha\n\n        self.encoder_q = encoder_q\n        self.encoder_k = encoder_k\n\n        # create the encoders\n        self.head_q = nn.Sequential(\n            nn.Linear(dim, dim),\n            nn.ReLU(),\n            nn.Linear(dim, dim)\n        )\n        self.head_k = nn.Sequential(\n            nn.Linear(dim, dim),\n            nn.ReLU(),\n            nn.Linear(dim, dim)\n        )\n\n        for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n            param_k.data.copy_(param_q.data)  # initialize\n            param_k.requires_grad = False  # not update by gradient\n        for param_q, param_k in zip(self.head_q.parameters(), self.head_k.parameters()):\n            param_k.data.copy_(param_q.data)  # initialize\n            param_k.requires_grad = False  # not update by gradient\n\n        self.register_buffer('queue', F.normalize(torch.randn(dim, K), dim=0))\n        self.register_buffer('queue_ptr', torch.zeros(1, dtype=torch.long))\n\n\n    def compute_loss(self, q, k, k_negs):\n        # compute logits\n        # positive logits: Nx1\n        l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)\n        # negative logits: NxK\n        l_neg = torch.einsum('nc,ck->nk', [q, k_negs])\n\n        # logits: Nx(1+K)\n        logits = torch.cat([l_pos, l_neg], dim=1)\n\n        # apply temperature\n        logits /= self.T\n\n        # labels: positive key indicators - first dim of each batch\n        labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()\n        loss = F.cross_entropy(logits, labels)\n\n        return loss\n\n    def convert_coeff(self, x, eps=1e-6):\n        amp = torch.sqrt((x.real + eps).pow(2) + (x.imag + eps).pow(2))\n        phase = torch.atan2(x.imag, x.real + eps)\n        return amp, phase\n\n    def instance_contrastive_loss(self, z1, z2):\n        B, T = z1.size(0), z1.size(1)\n        z = torch.cat([z1, z2], dim=0)  # 2B x T x C\n        z = z.transpose(0, 1)  # T x 2B x C\n        sim = torch.matmul(z, z.transpose(1, 2))  # T x 2B x 2B\n        logits = torch.tril(sim, diagonal=-1)[:, :, :-1]  # T x 2B x (2B-1)\n        logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n        logits = -F.log_softmax(logits, dim=-1)\n\n        i = torch.arange(B, device=z1.device)\n        loss = (logits[:, i, B + i - 1].mean() + logits[:, B + i, i].mean()) / 2\n        return loss\n\n    def forward(self, x_q, x_k):\n        # compute query features\n        rand_idx = np.random.randint(0, x_q.shape[1])\n\n        q_t, q_s = self.encoder_q(x_q)\n        if q_t is not None:\n            q_t = F.normalize(self.head_q(q_t[:, rand_idx]), dim=-1)\n\n        # compute key features\n        with torch.no_grad():  # no gradient for keys\n            self._momentum_update_key_encoder()  # update key encoder\n            k_t, k_s = self.encoder_k(x_k)\n            if k_t is not None:\n                k_t = F.normalize(self.head_k(k_t[:, rand_idx]), dim=-1)\n\n        loss = 0\n\n        loss += self.compute_loss(q_t, k_t, self.queue.clone().detach())\n        self._dequeue_and_enqueue(k_t)\n\n        q_s = F.normalize(q_s, dim=-1)\n        _, k_s = self.encoder_q(x_k)\n        k_s = F.normalize(k_s, dim=-1)\n\n        q_s_freq = fft.rfft(q_s, dim=1)\n        k_s_freq = fft.rfft(k_s, dim=1)\n        q_s_amp, q_s_phase = self.convert_coeff(q_s_freq)\n        k_s_amp, k_s_phase = self.convert_coeff(k_s_freq)\n\n        seasonal_loss = self.instance_contrastive_loss(q_s_amp, k_s_amp) + \\\n                        self.instance_contrastive_loss(q_s_phase,k_s_phase)\n        loss += (self.alpha * (seasonal_loss/2))\n\n        return loss\n\n    @torch.no_grad()\n    def _momentum_update_key_encoder(self):\n        \"\"\"\n        Momentum update for key encoder\n        \"\"\"\n        for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n            param_k.data = param_k.data * self.m + param_q.data * (1 - self.m)\n        for param_q, param_k in zip(self.head_q.parameters(), self.head_k.parameters()):\n            param_k.data = param_k.data * self.m + param_q.data * (1 - self.m)\n\n    @torch.no_grad()\n    def _dequeue_and_enqueue(self, keys):\n        batch_size = keys.shape[0]\n\n        ptr = int(self.queue_ptr)\n        assert self.K % batch_size == 0\n\n        # replace keys at ptr (dequeue and enqueue)\n        self.queue[:, ptr:ptr + batch_size] = keys.T\n\n        ptr = (ptr + batch_size) % self.K\n        self.queue_ptr[0] = ptr\n\n\nclass CoST:\n    def __init__(self,\n                 input_dims: int,\n                 kernels: List[int],\n                 alpha: bool,\n                 max_train_length: int,\n                 output_dims: int = 320,\n                 hidden_dims: int = 64,\n                 depth: int = 10,\n                 device: 'str' ='cuda',\n                 lr: float = 0.001,\n                 batch_size: int = 16,\n                 after_iter_callback: Union[Callable, None] = None,\n                 after_epoch_callback: Union[Callable, None] = None):\n\n        super().__init__()\n        self.input_dims = input_dims\n        self.output_dims = output_dims\n        self.hidden_dims = hidden_dims\n        self.device = device\n        self.lr = lr\n        self.batch_size = batch_size\n        self.max_train_length = max_train_length\n\n        if kernels is None:\n            kernels = []\n\n        self.net = CoSTEncoder(\n            input_dims=input_dims, output_dims=output_dims,\n            kernels=kernels,\n            length=max_train_length,\n            hidden_dims=hidden_dims, depth=depth,\n        ).to(self.device)\n\n        self.cost = CoSTModel(\n            self.net,\n            copy.deepcopy(self.net),\n            kernels=kernels,\n            dim=self.net.component_dims,\n            alpha=alpha,\n            K=256,\n            device=self.device,\n        ).to(self.device)\n\n        self.after_iter_callback = after_iter_callback\n        self.after_epoch_callback = after_epoch_callback\n        \n        self.n_epochs = 0\n        self.n_iters = 0\n\n    def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):\n        assert train_data.ndim == 3\n\n        if n_iters is None and n_epochs is None:\n            n_iters = 200 if train_data.size <= 100000 else 600\n\n        if self.max_train_length is not None:\n            sections = train_data.shape[1] // self.max_train_length\n            if sections >= 2:\n                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)\n\n        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0)\n        if temporal_missing[0] or temporal_missing[-1]:\n            train_data = centerize_vary_length_series(train_data)\n                \n        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)]\n\n        multiplier = 1 if train_data.shape[0] >= self.batch_size else math.ceil(self.batch_size / train_data.shape[0])\n        train_dataset = PretrainDataset(torch.from_numpy(train_data).to(torch.float), sigma=0.5, multiplier=multiplier)\n        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)\n\n        optimizer = torch.optim.SGD([p for p in self.cost.parameters() if p.requires_grad],\n                                    lr=self.lr,\n                                    momentum=0.9,\n                                    weight_decay=1e-4)\n        \n        loss_log = []\n        \n        while True:\n            if n_epochs is not None and self.n_epochs >= n_epochs:\n                break\n            \n            cum_loss = 0\n            n_epoch_iters = 0\n            \n            interrupted = False\n            for batch in train_loader:\n                if n_iters is not None and self.n_iters >= n_iters:\n                    interrupted = True\n                    break\n\n                x_q, x_k = map(lambda x: x.to(self.device), batch)\n                if self.max_train_length is not None and x_q.size(1) > self.max_train_length:\n                    window_offset = np.random.randint(x_q.size(1) - self.max_train_length + 1)\n                    x_q = x_q[:, window_offset : window_offset + self.max_train_length]\n                if self.max_train_length is not None and x_k.size(1) > self.max_train_length:\n                    window_offset = np.random.randint(x_k.size(1) - self.max_train_length + 1)\n                    x_k = x_k[:, window_offset : window_offset + self.max_train_length]\n\n                optimizer.zero_grad()\n\n                loss = self.cost(x_q, x_k)\n\n                loss.backward()\n                optimizer.step()\n\n                cum_loss += loss.item()\n                n_epoch_iters += 1\n                \n                self.n_iters += 1\n                \n                if self.after_iter_callback is not None:\n                    self.after_iter_callback(self, loss.item())\n\n                if n_iters is not None:\n                    adjust_learning_rate(optimizer, self.lr, self.n_iters, n_iters)\n            \n            if interrupted:\n                break\n            \n            cum_loss /= n_epoch_iters\n            loss_log.append(cum_loss)\n            if verbose:\n                print(f\"Epoch #{self.n_epochs}: loss={cum_loss}\")\n            self.n_epochs += 1\n\n            if self.after_epoch_callback is not None:\n                self.after_epoch_callback(self, cum_loss)\n\n            if n_epochs is not None:\n                adjust_learning_rate(optimizer, self.lr, self.n_epochs, n_epochs)\n            \n        return loss_log\n    \n    def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_window=None):\n        out_t, out_s = self.net(x.to(self.device, non_blocking=True))  # l b t d\n        out = torch.cat([out_t[:, -1], out_s[:, -1]], dim=-1)\n        return rearrange(out.cpu(), 'b d -> b () d')\n    \n    def encode(self, data, mode, mask=None, encoding_window=None, casual=False, sliding_length=None, sliding_padding=0, batch_size=None):\n        if mode == 'forecasting':\n            encoding_window = None\n            slicing = None\n        else:\n            raise NotImplementedError(f\"mode {mode} has not been implemented\")\n\n        assert data.ndim == 3\n        if batch_size is None:\n            batch_size = self.batch_size\n        n_samples, ts_l, _ = data.shape\n\n        org_training = self.net.training\n        self.net.eval()\n        \n        dataset = TensorDataset(torch.from_numpy(data).to(torch.float))\n        loader = DataLoader(dataset, batch_size=batch_size)\n        \n        with torch.no_grad():\n            output = []\n            for batch in loader:\n                x = batch[0]\n                if sliding_length is not None:\n                    reprs = []\n                    if n_samples < batch_size:\n                        calc_buffer = []\n                        calc_buffer_l = 0\n                    for i in range(0, ts_l, sliding_length):\n                        l = i - sliding_padding\n                        r = i + sliding_length + (sliding_padding if not casual else 0)\n                        x_sliding = torch_pad_nan(\n                            x[:, max(l, 0) : min(r, ts_l)],\n                            left=-l if l<0 else 0,\n                            right=r-ts_l if r>ts_l else 0,\n                            dim=1\n                        )\n                        if n_samples < batch_size:\n                            if calc_buffer_l + n_samples > batch_size:\n                                out = self._eval_with_pooling(\n                                    torch.cat(calc_buffer, dim=0),\n                                    mask,\n                                    slicing=slicing,\n                                    encoding_window=encoding_window\n                                )\n                                reprs += torch.split(out, n_samples)\n                                calc_buffer = []\n                                calc_buffer_l = 0\n                            calc_buffer.append(x_sliding)\n                            calc_buffer_l += n_samples\n                        else:\n                            out = self._eval_with_pooling(\n                                x_sliding,\n                                mask,\n                                slicing=slicing,\n                                encoding_window=encoding_window\n                            )\n                            reprs.append(out)\n\n                    if n_samples < batch_size:\n                        if calc_buffer_l > 0:\n                            out = self._eval_with_pooling(\n                                torch.cat(calc_buffer, dim=0),\n                                mask,\n                                slicing=slicing,\n                                encoding_window=encoding_window\n                            )\n                            reprs += torch.split(out, n_samples)\n                            calc_buffer = []\n                            calc_buffer_l = 0\n                    \n                    out = torch.cat(reprs, dim=1)\n                    if encoding_window == 'full_series':\n                        out = F.max_pool1d(\n                            out.transpose(1, 2).contiguous(),\n                            kernel_size = out.size(1),\n                        ).squeeze(1)\n                else:\n                    out = self._eval_with_pooling(x, mask, encoding_window=encoding_window)\n                    if encoding_window == 'full_series':\n                        out = out.squeeze(1)\n                        \n                output.append(out)\n                \n            output = torch.cat(output, dim=0)\n\n        self.net.train(org_training)\n        return output.numpy()\n    \n    def save(self, fn):\n        ''' Save the model to a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        torch.save(self.net.state_dict(), fn)\n    \n    def load(self, fn):\n        ''' Load the model from a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        state_dict = torch.load(fn, map_location=self.device)\n        self.net.load_state_dict(state_dict)\n\n\ndef adjust_learning_rate(optimizer, lr, epoch, epochs):\n    \"\"\"Decay the learning rate based on schedule\"\"\"\n    lr *= 0.5 * (1. + math.cos(math.pi * epoch / epochs))\n    for param_group in optimizer.param_groups:\n        param_group['lr'] = lr\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/datasets/PLACE_DATASETS_HERE",
    "content": "Please follow the instructions in README.md to place the datasets into this folder."
  },
  {
    "path": "ts_forecasting_methods/CoST/datasets/electricity.py",
    "content": "import pandas as pd\ndata_ecl = pd.read_csv('LD2011_2014.txt', parse_dates=True, sep=';', decimal=',', index_col=0)\ndata_ecl = data_ecl.resample('1h', closed='right').sum()\ndata_ecl = data_ecl.loc[:, data_ecl.cumsum(axis=0).iloc[8920] != 0]  # filter out instances with missing values\ndata_ecl.index = data_ecl.index.rename('date')\ndata_ecl = data_ecl['2012':]\ndata_ecl.to_csv('electricity.csv')"
  },
  {
    "path": "ts_forecasting_methods/CoST/datasets/m5.py",
    "content": "import pandas as pd\nimport numpy as np\n\ncalendar = pd.read_csv('calendar.csv', index_col='date', parse_dates=True)\ntrain_validation = pd.read_csv('sales_train_validation.csv')\ntrain_evaluation = pd.read_csv('sales_train_evaluation.csv')\ntest_validation = pd.read_csv('sales_test_validation.csv')\ntest_evaluation = pd.read_csv('sales_test_evaluation.csv')\n\nall_data = pd.merge(\n    train_evaluation,\n    test_evaluation,\n    how=\"inner\",\n    on=None,\n    left_on=['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'],\n    right_on=['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'],\n    sort=False,\n    suffixes=(\"_x\", \"_y\"),\n    copy=True,\n    indicator=False,\n    validate=None,\n)\n\ngroups = {\n    'l1': ['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'],\n    'l2': ['state_id'],\n    'l3': ['store_id'],\n    'l4': ['cat_id'],\n    'l5': ['dept_id'],\n    'l6': ['state_id', 'cat_id'],\n    'l7': ['state_id', 'dept_id'],\n    'l8': ['store_id', 'cat_id'],\n    'l9': ['store_id', 'dept_id'],\n    'l10': ['item_id'],\n}\n\nfor k, v in groups.items():\n    if k == 'l1':\n        grouped_data = all_data.drop(columns=v).sum().to_frame(name='total')\n    else:\n        grouped_data = all_data.groupby(v).sum().transpose()\n    grouped_data['date'] = calendar.index\n    grouped_data = grouped_data.set_index('date')\n\n    if isinstance(grouped_data.columns, pd.MultiIndex):\n        grouped_data.columns = [c[0] + \"_\" + c[1] for c in grouped_data.columns]\n\n    grouped_data.to_csv(f'M5-{k}.csv', index=True)\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/datautils.py",
    "content": "import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\n\ndef load_forecast_npy(name, univar=False):\n    data = np.load(f'datasets/{name}.npy')    \n    if univar:\n        data = data[: -1:]\n        \n    train_slice = slice(None, int(0.6 * len(data)))\n    valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))\n    test_slice = slice(int(0.8 * len(data)), None)\n    \n    scaler = StandardScaler().fit(data[train_slice])\n    data = scaler.transform(data)\n    data = np.expand_dims(data, 0)\n\n    pred_lens = [24, 48, 96, 288, 672]\n    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, 0\n\ndef _get_time_features(dt):\n    return np.stack([\n        dt.minute.to_numpy(),\n        dt.hour.to_numpy(),\n        dt.dayofweek.to_numpy(),\n        dt.day.to_numpy(),\n        dt.dayofyear.to_numpy(),\n        dt.month.to_numpy(),\n        dt.weekofyear.to_numpy(),\n    ], axis=1).astype(np.float)\n\ndef load_forecast_csv(name, univar=False):\n    data = pd.read_csv(f'datasets/{name}.csv', index_col='date', parse_dates=True)\n    dt_embed = _get_time_features(data.index)\n    n_covariate_cols = dt_embed.shape[-1]\n    \n    if univar:\n        if name in ('ETTh1', 'ETTh2', 'ETTm1', 'ETTm2'):\n            data = data[['OT']]\n        elif name == 'electricity':\n            data = data[['MT_001']]\n        elif name == 'WTH':\n            data = data[['WetBulbCelsius']]\n        else:\n            data = data.iloc[:, -1:]\n\n    data = data.to_numpy()\n    if name == 'ETTh1' or name == 'ETTh2':\n        train_slice = slice(None, 12 * 30 * 24)\n        valid_slice = slice(12 * 30 * 24, 16 * 30 * 24)\n        test_slice = slice(16 * 30 * 24, 20 * 30 * 24)\n    elif name == 'ETTm1' or name == 'ETTm2':\n        train_slice = slice(None, 12 * 30 * 24 * 4)\n        valid_slice = slice(12 * 30 * 24 * 4, 16 * 30 * 24 * 4)\n        test_slice = slice(16 * 30 * 24 * 4, 20 * 30 * 24 * 4)\n    elif name.startswith('M5'):\n        train_slice = slice(None, int(0.8 * (1913 + 28)))\n        valid_slice = slice(int(0.8 * (1913 + 28)), 1913 + 28)\n        test_slice = slice(1913 + 28 - 1, 1913 + 2 * 28)\n    else:\n        train_slice = slice(None, int(0.6 * len(data)))\n        valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))\n        test_slice = slice(int(0.8 * len(data)), None)\n    \n    scaler = StandardScaler().fit(data[train_slice])\n    data = scaler.transform(data)\n    if name in ('electricity') or name.startswith('M5'):\n        data = np.expand_dims(data.T, -1)  # Each variable is an instance rather than a feature\n    else:\n        data = np.expand_dims(data, 0)\n\n    if n_covariate_cols > 0:\n        dt_scaler = StandardScaler().fit(dt_embed[train_slice])\n        dt_embed = np.expand_dims(dt_scaler.transform(dt_embed), 0)\n        data = np.concatenate([np.repeat(dt_embed, data.shape[0], axis=0), data], axis=-1)\n\n    if name in ('ETTh1', 'ETTh2', 'electricity', 'WTH'):\n        pred_lens = [24, 48, 168, 336, 720]\n    elif name.startswith('M5'):\n        pred_lens = [28]\n    else:\n        pred_lens = [24, 48, 96, 288, 672]\n        \n    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/models/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/CoST/models/dilated_conv.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass SamePadConv(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1):\n        super().__init__()\n        self.receptive_field = (kernel_size - 1) * dilation + 1\n        padding = self.receptive_field // 2\n        self.conv = nn.Conv1d(\n            in_channels, out_channels, kernel_size,\n            padding=padding,\n            dilation=dilation,\n            groups=groups\n        )\n        self.remove = 1 if self.receptive_field % 2 == 0 else 0\n        \n    def forward(self, x):\n        out = self.conv(x)\n        if self.remove > 0:\n            out = out[:, :, : -self.remove]\n        return out\n\n\nclass ConvBlock(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation, final=False):\n        super().__init__()\n        self.conv1 = SamePadConv(in_channels, out_channels, kernel_size, dilation=dilation)\n        self.conv2 = SamePadConv(out_channels, out_channels, kernel_size, dilation=dilation)\n        self.projector = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels or final else None\n    \n    def forward(self, x):\n        residual = x if self.projector is None else self.projector(x)\n        x = F.gelu(x)\n        x = self.conv1(x)\n        x = F.gelu(x)\n        x = self.conv2(x)\n        return x + residual\n\n\nclass DilatedConvEncoder(nn.Module):\n    def __init__(self, in_channels, channels, kernel_size, extract_layers=None):\n        super().__init__()\n\n        if extract_layers is not None:\n            assert len(channels) - 1 in extract_layers\n\n        self.extract_layers = extract_layers\n        self.net = nn.Sequential(*[\n            ConvBlock(\n                channels[i-1] if i > 0 else in_channels,\n                channels[i],\n                kernel_size=kernel_size,\n                dilation=2**i,\n                final=(i == len(channels)-1)\n            )\n            for i in range(len(channels))\n        ])\n        \n    def forward(self, x):\n        if self.extract_layers is not None:\n            outputs = []\n            for idx, mod in enumerate(self.net):\n                x = mod(x)\n                if idx in self.extract_layers:\n                    outputs.append(x)\n            return outputs\n        return self.net(x)\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/models/encoder.py",
    "content": "import math\nfrom typing import List\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.fft as fft\nfrom einops import reduce, rearrange, repeat\n\nimport numpy as np\n\nfrom .dilated_conv import DilatedConvEncoder\n\n\ndef generate_continuous_mask(B, T, n=5, l=0.1):\n    res = torch.full((B, T), True, dtype=torch.bool)\n    if isinstance(n, float):\n        n = int(n * T)\n    n = max(min(n, T // 2), 1)\n    \n    if isinstance(l, float):\n        l = int(l * T)\n    l = max(l, 1)\n    \n    for i in range(B):\n        for _ in range(n):\n            t = np.random.randint(T-l+1)\n            res[i, t:t+l] = False\n    return res\n\n\ndef generate_binomial_mask(B, T, p=0.5):\n    return torch.from_numpy(np.random.binomial(1, p, size=(B, T))).to(torch.bool)\n\n\nclass BandedFourierLayer(nn.Module):\n    def __init__(self, in_channels, out_channels, band, num_bands, length=201):\n        super().__init__()\n\n        self.length = length\n        self.total_freqs = (self.length // 2) + 1\n\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        self.band = band  # zero indexed\n        self.num_bands = num_bands\n\n        self.num_freqs = self.total_freqs // self.num_bands + (self.total_freqs % self.num_bands if self.band == self.num_bands - 1 else 0)\n\n        self.start = self.band * (self.total_freqs // self.num_bands)\n        self.end = self.start + self.num_freqs\n\n\n        # case: from other frequencies\n        self.weight = nn.Parameter(torch.empty((self.num_freqs, in_channels, out_channels), dtype=torch.cfloat))\n        self.bias = nn.Parameter(torch.empty((self.num_freqs, out_channels), dtype=torch.cfloat))\n        self.reset_parameters()\n\n    def forward(self, input):\n        # input - b t d\n        b, t, _ = input.shape\n        input_fft = fft.rfft(input, dim=1)\n        output_fft = torch.zeros(b, t // 2 + 1, self.out_channels, device=input.device, dtype=torch.cfloat)\n        output_fft[:, self.start:self.end] = self._forward(input_fft)\n        return fft.irfft(output_fft, n=input.size(1), dim=1)\n\n    def _forward(self, input):\n        output = torch.einsum('bti,tio->bto', input[:, self.start:self.end], self.weight)\n        return output + self.bias\n\n    def reset_parameters(self) -> None:\n        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n        fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)\n        bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n        nn.init.uniform_(self.bias, -bound, bound)\n\n\nclass CoSTEncoder(nn.Module):\n    def __init__(self, input_dims, output_dims,\n                 kernels: List[int],\n                 length: int,\n                 hidden_dims=64, depth=10,\n                 mask_mode='binomial'):\n        super().__init__()\n\n        component_dims = output_dims // 2\n\n        self.input_dims = input_dims\n        self.output_dims = output_dims\n        self.component_dims = component_dims\n        self.hidden_dims = hidden_dims\n        self.mask_mode = mask_mode\n        self.input_fc = nn.Linear(input_dims, hidden_dims)\n\n        self.feature_extractor = DilatedConvEncoder(\n            hidden_dims,\n            [hidden_dims] * depth + [output_dims],\n            kernel_size=3\n        )\n\n        self.repr_dropout = nn.Dropout(p=0.1)\n\n        self.kernels = kernels\n\n        self.tfd = nn.ModuleList(\n            [nn.Conv1d(output_dims, component_dims, k, padding=k-1) for k in kernels]\n        )\n\n        self.sfd = nn.ModuleList(\n            [BandedFourierLayer(output_dims, component_dims, b, 1, length=length) for b in range(1)]\n        )\n\n    def forward(self, x, tcn_output=False, mask='all_true'):  # x: B x T x input_dims\n        nan_mask = ~x.isnan().any(axis=-1)\n        x[~nan_mask] = 0\n        x = self.input_fc(x)  # B x T x Ch\n\n        # generate & apply mask\n        if mask is None:\n            if self.training:\n                mask = self.mask_mode\n            else:\n                mask = 'all_true'\n\n        if mask == 'binomial':\n            mask = generate_binomial_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'continuous':\n            mask = generate_continuous_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'all_true':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n        elif mask == 'all_false':\n            mask = x.new_full((x.size(0), x.size(1)), False, dtype=torch.bool)\n        elif mask == 'mask_last':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n            mask[:, -1] = False\n\n        mask &= nan_mask\n        x[~mask] = 0\n\n        # conv encoder\n        x = x.transpose(1, 2)  # B x Ch x T\n        x = self.feature_extractor(x)  # B x Co x T\n\n        if tcn_output:\n            return x.transpose(1, 2)\n\n        trend = []\n        for idx, mod in enumerate(self.tfd):\n            out = mod(x)  # b d t\n            if self.kernels[idx] != 1:\n                out = out[..., :-(self.kernels[idx] - 1)]\n            trend.append(out.transpose(1, 2))  # b t d\n        trend = reduce(\n            rearrange(trend, 'list b t d -> list b t d'),\n            'list b t d -> b t d', 'mean'\n        )\n\n        x = x.transpose(1, 2)  # B x T x Co\n\n        season = []\n        for mod in self.sfd:\n            out = mod(x)  # b t d\n            season.append(out)\n        season = season[0]\n\n        return trend, self.repr_dropout(season)\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/requirements.txt",
    "content": "scipy==1.6.1\ntorch==1.9.0\nnumpy==1.19.2\npandas==1.0.1\nscikit_learn==0.24.1\neinops==0.3.0\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/scripts/ETT_CoST.sh",
    "content": "for seed in $(seq 0 4 11 22 43); do\n  # multivar\n  python -u train.py ETTh1 forecast_multivar --alpha 0.0005 --kernels 1 2 4 8 16 32 64 128 --max-train-length 201 --batch-size 128 --archive forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\n  python -u train.py ETTh2 forecast_multivar --alpha 0.0005 --kernels 1 2 4 8 16 32 64 128 --max-train-length 201 --batch-size 128 --archive forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\n  python -u train.py ETTm1 forecast_multivar --alpha 0.0005 --kernels 1 2 4 8 16 32 64 128 --max-train-length 201 --batch-size 128 --archive forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\ndone\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/scripts/Electricity_CoST.sh",
    "content": "for seed in $(seq 0 4 11 22 43); do\n  python -u train.py electricity forecast_multivar --alpha 0.0005 --kernels 1 2 4 8 16 32 64 128 --max-train-length 201 --batch-size 128 --archive forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\ndone"
  },
  {
    "path": "ts_forecasting_methods/CoST/scripts/M5_CoST.sh",
    "content": "for level in $(seq 1 10); do\n  for seed in $(seq 0 4 11 22 43); do\n    # multivar\n    python -u train.py M5-l${level} forecast_multivar --alpha 0.0005 --kernels 1 2 4 8 16 32 64 128 --max-train-length 201 --batch-size 128 --archive forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\n    # univar\n    python -u train.py M5-l${level} forecast_univar --alpha 0.0005 --kernels 1 2 4 8 16 32 64 128 --max-train-length 201 --batch-size 128 --archive forecast_csv_univar --repr-dims 320 --max-threads 8 --seed ${seed} --eval\n  done\ndone"
  },
  {
    "path": "ts_forecasting_methods/CoST/scripts/Weather_CoST.sh",
    "content": "for seed in $(seq 0 0 4 11 22 43); do\n  # multivar\n  python -u train.py WTH forecast_multivar --alpha 0.0005 --kernels 1 2 4 8 16 32 64 128 --max-train-length 201 --batch-size 128 --archive forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\n  # univar\n  python -u train.py WTH forecast_univar --alpha 0.0005 --kernels 1 2 4 8 16 32 64 128 --max-train-length 201 --batch-size 128 --archive forecast_csv_univar --repr-dims 320 --max-threads 8 --seed ${seed} --eval\ndone"
  },
  {
    "path": "ts_forecasting_methods/CoST/tasks/__init__.py",
    "content": "from .forecasting import eval_forecasting\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/tasks/_eval_protocols.py",
    "content": "import numpy as np\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import GridSearchCV, train_test_split\n\n\ndef fit_ridge(train_features, train_y, valid_features, valid_y, MAX_SAMPLES=100000):\n    # If the training set is too large, subsample MAX_SAMPLES examples\n    if train_features.shape[0] > MAX_SAMPLES:\n        split = train_test_split(\n            train_features, train_y,\n            train_size=MAX_SAMPLES, random_state=0\n        )\n        train_features = split[0]\n        train_y = split[2]\n    if valid_features.shape[0] > MAX_SAMPLES:\n        split = train_test_split(\n            valid_features, valid_y,\n            train_size=MAX_SAMPLES, random_state=0\n        )\n        valid_features = split[0]\n        valid_y = split[2]\n    alphas = [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]\n    valid_results = []\n    for alpha in alphas:\n        lr = Ridge(alpha=alpha).fit(train_features, train_y)\n        valid_pred = lr.predict(valid_features)\n        score = np.sqrt(((valid_pred - valid_y) ** 2).mean()) + np.abs(valid_pred - valid_y).mean()\n        valid_results.append(score)\n    best_alpha = alphas[np.argmin(valid_results)]\n    \n    lr = Ridge(alpha=best_alpha)\n    lr.fit(train_features, train_y)\n    return lr\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/tasks/forecasting.py",
    "content": "import numpy as np\nimport time\nfrom . import _eval_protocols as eval_protocols\n\n\ndef generate_pred_samples(features, data, pred_len, drop=0):\n    n = data.shape[1]\n    features = features[:, :-pred_len]\n    labels = np.stack([ data[:, i:1+n+i-pred_len] for i in range(pred_len)], axis=2)[:, 1:]\n    features = features[:, drop:]\n    labels = labels[:, drop:]\n    return features.reshape(-1, features.shape[-1]), \\\n            labels.reshape(-1, labels.shape[2]*labels.shape[3])\n\n\ndef cal_metrics(pred, target):\n    return {\n        'MSE': ((pred - target) ** 2).mean(),\n        'MAE': np.abs(pred - target).mean()\n    }\n\n\ndef eval_forecasting(model, data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols, padding):\n    t = time.time()\n\n    all_repr = model.encode(\n        data,\n        mode='forecasting',\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=256\n    )\n\n    train_repr = all_repr[:, train_slice]\n    valid_repr = all_repr[:, valid_slice]\n    test_repr = all_repr[:, test_slice]\n\n    train_data = data[:, train_slice, n_covariate_cols:]\n    valid_data = data[:, valid_slice, n_covariate_cols:]\n    test_data = data[:, test_slice, n_covariate_cols:]\n\n    encoder_infer_time = time.time() - t\n    \n    ours_result = {}\n    lr_train_time = {}\n    lr_infer_time = {}\n    out_log = {}\n    for pred_len in pred_lens:\n        train_features, train_labels = generate_pred_samples(train_repr, train_data, pred_len, drop=padding)\n        valid_features, valid_labels = generate_pred_samples(valid_repr, valid_data, pred_len)\n        test_features, test_labels = generate_pred_samples(test_repr, test_data, pred_len)\n\n        t = time.time()\n        lr = eval_protocols.fit_ridge(train_features, train_labels, valid_features, valid_labels)\n        lr_train_time[pred_len] = time.time() - t\n\n        t = time.time()\n        test_pred = lr.predict(test_features)\n        lr_infer_time[pred_len] = time.time() - t\n\n        ori_shape = test_data.shape[0], -1, pred_len, test_data.shape[2]\n        test_pred = test_pred.reshape(ori_shape)\n        test_labels = test_labels.reshape(ori_shape)\n\n        if test_data.shape[0] > 1:\n            test_pred_inv = scaler.inverse_transform(test_pred.swapaxes(0, 3)).swapaxes(0, 3)\n            test_labels_inv = scaler.inverse_transform(test_labels.swapaxes(0, 3)).swapaxes(0, 3)\n        else:\n            test_pred_inv = scaler.inverse_transform(test_pred)\n            test_labels_inv = scaler.inverse_transform(test_labels)\n        out_log[pred_len] = {\n            'norm': test_pred,\n            'raw': test_pred_inv,\n            'norm_gt': test_labels,\n            'raw_gt': test_labels_inv\n        }\n        ours_result[pred_len] = {\n            'norm': cal_metrics(test_pred, test_labels),\n            'raw': cal_metrics(test_pred_inv, test_labels_inv)\n        }\n        \n    eval_res = {\n        'ours': ours_result,\n        'encoder_infer_time': encoder_infer_time,\n        'lr_train_time': lr_train_time,\n        'lr_infer_time': lr_infer_time\n    }\n    return out_log, eval_res\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/train.py",
    "content": "import argparse\nimport os\nimport time\nimport datetime\nimport math\nimport numpy as np\nimport tasks\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\n# import methods\nfrom cost import CoST\n\n\ndef save_checkpoint_callback(\n    save_every=1,\n    unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n    return callback\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('dataset', help='The dataset name')\n    parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--archive', type=str, required=True, help='The archive name that the dataset belongs to. This can be set to forecast_csv, or forecast_csv_univar')\n    parser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None, help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=None, help='The maximum allowed number of threads used by this process')\n    parser.add_argument('--eval', action=\"store_true\", help='Whether to perform evaluation after training')\n\n    parser.add_argument('--kernels', type=int, nargs='+', default=None)\n    parser.add_argument('--alpha', type=float, default=0.0005)\n\n    args = parser.parse_args()\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n    \n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n\n    if args.archive == 'forecast_csv':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(args.dataset)\n        train_data = data[:, train_slice]\n    elif args.archive == 'forecast_csv_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(args.dataset, univar=True)\n        train_data = data[:, train_slice]\n    elif args.archive == 'forecast_npy':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(args.dataset)\n        train_data = data[:, train_slice]\n    elif args.archive == 'forecast_npy_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(args.dataset, univar=True)\n        train_data = data[:, train_slice]\n    else:\n        raise ValueError(f\"Archive type {args.archive} is not supported.\")\n\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n    )\n    \n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = f\"training/{args.dataset}/{name_with_datetime(args.run_name)}\"\n\n    os.makedirs(run_dir, exist_ok=True)\n    \n    t = time.time()\n\n    model = CoST(\n        input_dims=train_data.shape[-1],\n        kernels=args.kernels,\n        alpha=args.alpha,\n        max_train_length=args.max_train_length,\n        device=device,\n        **config\n    )\n\n    loss_log = model.fit(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\\n\")\n\n    if args.eval:\n        out, eval_res = tasks.eval_forecasting(model, data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols, args.max_train_length-1)\n        print('Evaluation result:', eval_res)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        pkl_save(f'{run_dir}/out.pkl', out)\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_forecasting_methods/CoST/utils.py",
    "content": "import os\nimport numpy as np\nimport pickle\nimport torch\nimport random\nfrom datetime import datetime\nimport torch.nn as nn\n\n\ndef pkl_save(name, var):\n    with open(name, 'wb') as f:\n        pickle.dump(var, f)\n\ndef pkl_load(name):\n    with open(name, 'rb') as f:\n        return pickle.load(f)\n    \ndef torch_pad_nan(arr, left=0, right=0, dim=0):\n    if left > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = left\n        arr = torch.cat((torch.full(padshape, np.nan), arr), dim=dim)\n    if right > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = right\n        arr = torch.cat((arr, torch.full(padshape, np.nan)), dim=dim)\n    return arr\n    \ndef pad_nan_to_target(array, target_length, axis=0, both_side=False):\n    assert array.dtype in [np.float16, np.float32, np.float64]\n    pad_size = target_length - array.shape[axis]\n    if pad_size <= 0:\n        return array\n    npad = [(0, 0)] * array.ndim\n    if both_side:\n        npad[axis] = (pad_size // 2, pad_size - pad_size//2)\n    else:\n        npad[axis] = (0, pad_size)\n    return np.pad(array, pad_width=npad, mode='constant', constant_values=np.nan)\n\ndef split_with_nan(x, sections, axis=0):\n    assert x.dtype in [np.float16, np.float32, np.float64]\n    arrs = np.array_split(x, sections, axis=axis)\n    target_length = arrs[0].shape[axis]\n    for i in range(len(arrs)):\n        arrs[i] = pad_nan_to_target(arrs[i], target_length, axis=axis)\n    return arrs\n\ndef take_per_row(A, indx, num_elem):\n    all_indx = indx[:,None] + np.arange(num_elem)\n    return A[torch.arange(all_indx.shape[0])[:,None], all_indx]\n\ndef centerize_vary_length_series(x):\n    prefix_zeros = np.argmax(~np.isnan(x).all(axis=-1), axis=1)\n    suffix_zeros = np.argmax(~np.isnan(x[:, ::-1]).all(axis=-1), axis=1)\n    offset = (prefix_zeros + suffix_zeros) // 2 - prefix_zeros\n    rows, column_indices = np.ogrid[:x.shape[0], :x.shape[1]]\n    offset[offset < 0] += x.shape[1]\n    column_indices = column_indices - offset[:, np.newaxis]\n    return x[rows, column_indices]\n\ndef data_dropout(arr, p):\n    B, T = arr.shape[0], arr.shape[1]\n    mask = np.full(B*T, False, dtype=np.bool)\n    ele_sel = np.random.choice(\n        B*T,\n        size=int(B*T*p),\n        replace=False\n    )\n    mask[ele_sel] = True\n    res = arr.copy()\n    res[mask.reshape(B, T)] = np.nan\n    return res\n\ndef name_with_datetime(prefix='default'):\n    now = datetime.now()\n    return prefix + '_' + now.strftime(\"%Y%m%d_%H%M%S\")\n\ndef init_dl_program(\n    device_name,\n    seed=None,\n    use_cudnn=True,\n    deterministic=False,\n    benchmark=False,\n    use_tf32=False,\n    max_threads=None\n):\n    import torch\n    if max_threads is not None:\n        torch.set_num_threads(max_threads)  # intraop\n        if torch.get_num_interop_threads() != max_threads:\n            torch.set_num_interop_threads(max_threads)  # interop\n        try:\n            import mkl\n        except:\n            pass\n        else:\n            mkl.set_num_threads(max_threads)\n        \n    if seed is not None:\n        random.seed(seed)\n        seed += 1\n        np.random.seed(seed)\n        seed += 1\n        torch.manual_seed(seed)\n        \n    if isinstance(device_name, (str, int)):\n        device_name = [device_name]\n    \n    devices = []\n    for t in reversed(device_name):\n        t_device = torch.device(t)\n        devices.append(t_device)\n        if t_device.type == 'cuda':\n            assert torch.cuda.is_available()\n            torch.cuda.set_device(t_device)\n            if seed is not None:\n                seed += 1\n                torch.cuda.manual_seed(seed)\n    devices.reverse()\n    torch.backends.cudnn.enabled = use_cudnn\n    torch.backends.cudnn.deterministic = deterministic\n    torch.backends.cudnn.benchmark = benchmark\n    \n    if hasattr(torch.backends.cudnn, 'allow_tf32'):\n        torch.backends.cudnn.allow_tf32 = use_tf32\n        torch.backends.cuda.matmul.allow_tf32 = use_tf32\n        \n    return devices if len(devices) > 1 else devices[0]\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/README.md",
    "content": "## README_Forecasting\n\n### Usage\n\n\nTo train and evaluate a model on a dataset, set the dataset_name `dataset='ETTh1' or 'Traffic'`, and then run the following command:\n\n   ```python\n   python train_logtrans.py\n   python train_tcn.py\n   python train_informer.py\n   python train_autoformer.py\n   python train_ts2vec.py\n   python train_cost.py\n   python train_timesnet.py\n   python train_patchtst.py\n   python train_dlinear.py\n   python train_gpt4ts.py\n   python train_tempo.py\n   python train_itransformer.py\n   ```"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/data_config.yml",
    "content": "datasets:\n  ETTh1:\n    root_path: ./dataset/ETT-small/\n    data_path: ETTh1.csv\n    data_name: etth1\n    data: ett_h\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n  ETTh2:\n    root_path: ./dataset/ETT-small/\n    data_path: ETTh2.csv\n    data_name: etth2\n    data: ett_h\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n  ETTm1:\n    root_path: ./dataset/ETT-small/\n    data_path: ETTm1.csv\n    data_name: ettm1\n    data: ett_m\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n  ETTm2:\n    root_path: ./dataset/ETT-small/\n    data_path: ETTm2.csv\n    data_name: ettm2\n    data: ett_m\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n  traffic:\n    root_path: ./dataset/traffic/\n    data_path: traffic.csv\n    data_name: traffic\n    data: custom\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n  weather:\n    root_path: ./dataset/weather/\n    data_path: weather.csv\n    data_name: weather\n    data: custom\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n  electricity:\n    root_path: ./dataset/electricity/\n    data_path: electricity.csv\n    data_name: electricity\n    data: custom\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n  ILI:\n    root_path: ./dataset/illness/\n    data_path: national_illness.csv\n    data_name: ILI\n    data: custom\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n  exchange:\n    root_path: ./dataset/exchange_rate/\n    data_path: exchange_rate.csv\n    data_name: exchange\n    data: custom\n    lradj: type4\n    features: M\n    target: OT\n    embed: timeF\n    freq: 0\n    percent: 100\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/data_provider/__init__.py",
    "content": "\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/data_provider/data_factory.py",
    "content": "from Other_baselines.data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_M4, PSMSegLoader, \\\n    MSLSegLoader, SMAPSegLoader, SMDSegLoader, SWATSegLoader, UEAloader\nfrom Other_baselines.data_provider.uea import collate_fn\nfrom torch.utils.data import DataLoader\n\ndata_dict = {\n    'ETTh1': Dataset_ETT_hour,\n    'ETTh2': Dataset_ETT_hour,\n    'ETTm1': Dataset_ETT_minute,\n    'ETTm2': Dataset_ETT_minute,\n    'custom': Dataset_Custom,\n    'm4': Dataset_M4,\n    'PSM': PSMSegLoader,\n    'MSL': MSLSegLoader,\n    'SMAP': SMAPSegLoader,\n    'SMD': SMDSegLoader,\n    'SWAT': SWATSegLoader,\n    'UEA': UEAloader\n}\n\n\ndef data_provider(args, flag):\n    Data = data_dict[args.data]\n    timeenc = 0 if args.embed != 'timeF' else 1\n\n    shuffle_flag = False if flag == 'test' else True\n    drop_last = False\n    batch_size = args.batch_size\n    freq = args.freq\n\n    if args.task_name == 'anomaly_detection':\n        drop_last = False\n        data_set = Data(\n            args = args,\n            root_path=args.root_path,\n            win_size=args.seq_len,\n            flag=flag,\n        )\n        print(flag, len(data_set))\n        data_loader = DataLoader(\n            data_set,\n            batch_size=batch_size,\n            shuffle=shuffle_flag,\n            num_workers=args.num_workers,\n            drop_last=drop_last)\n        return data_set, data_loader\n    elif args.task_name == 'classification':\n        drop_last = False\n        data_set = Data(\n            args = args,\n            root_path=args.root_path,\n            flag=flag,\n        )\n\n        data_loader = DataLoader(\n            data_set,\n            batch_size=batch_size,\n            shuffle=shuffle_flag,\n            num_workers=args.num_workers,\n            drop_last=drop_last,\n            collate_fn=lambda x: collate_fn(x, max_len=args.seq_len)\n        )\n        return data_set, data_loader\n    else:\n        if args.data == 'm4':\n            drop_last = False\n        data_set = Data(\n            args = args,\n            root_path=args.root_path,\n            data_path=args.data_path,\n            flag=flag,\n            size=[args.seq_len, args.label_len, args.pred_len],\n            features=args.features,\n            target=args.target,\n            timeenc=timeenc,\n            freq=freq,\n            seasonal_patterns=args.seasonal_patterns\n        )\n        print(\"flag = \", flag)\n        print(\"data_set = \", data_set)\n        # print(\"data_set.flag = \", data_set.flag, len(data_set.timeseries))\n        print(flag, len(data_set))\n        data_loader = DataLoader(\n            data_set,\n            batch_size=batch_size,\n            shuffle=shuffle_flag,\n            num_workers=args.num_workers,\n            drop_last=drop_last)\n        return data_set, data_loader\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/data_provider/data_factory_tempo.py",
    "content": "from Other_baselines.data_provider.data_loader_tempo import Dataset_Custom, Dataset_Pred, Dataset_TSF, Dataset_ETT_hour, Dataset_ETT_minute\nfrom torch.utils.data import DataLoader\n\ndata_dict = {\n    'custom': Dataset_Custom,\n    'tsf_data': Dataset_TSF,\n    'ett_h': Dataset_ETT_hour,\n    'ett_m': Dataset_ETT_minute,\n}\n\n\ndef data_provider(args, flag, drop_last_test=True, train_all=False):\n    Data = data_dict[args.data]\n    timeenc = 0 if args.embed != 'timeF' else 1\n    percent = args.percent\n    max_len = args.max_len\n\n    if flag == 'test':\n        shuffle_flag = False\n        drop_last = drop_last_test\n        batch_size = args.batch_size\n        freq = args.freq\n    elif flag == 'pred':\n        shuffle_flag = False\n        drop_last = False\n        batch_size = args.batch_size\n        freq = args.freq\n        Data = Dataset_Pred\n    elif flag == 'val':\n        shuffle_flag = True\n        drop_last = drop_last_test\n        batch_size = args.batch_size\n        freq = args.freq\n    else:\n        shuffle_flag = True\n        drop_last = True\n        batch_size = args.batch_size\n        freq = args.freq\n\n    data_set = Data(\n        root_path=args.root_path,\n        data_path=args.data_path,\n        flag=flag,\n        size=[args.seq_len, args.label_len, args.pred_len],\n        features=args.features,\n        target=args.target,\n        timeenc=timeenc,\n        freq=freq,\n        percent=percent,\n        max_len=max_len,\n        train_all=train_all,\n        data_name = args.data_name\n    )\n    print(flag, len(data_set))\n    data_loader = DataLoader(\n        data_set,\n        batch_size=batch_size,\n        shuffle=shuffle_flag,\n        num_workers=args.num_workers,\n        drop_last=drop_last)\n    return data_set, data_loader\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/data_provider/data_loader.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport glob\nimport re\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.preprocessing import StandardScaler\nfrom Other_baselines.utils.timefeatures import time_features\nfrom Other_baselines.data_provider.m4 import M4Dataset, M4Meta\nfrom Other_baselines.data_provider.uea import subsample, interpolate_missing, Normalizer\nfrom sktime.datasets import load_from_tsfile_to_dataframe\nimport warnings\nfrom Other_baselines.utils.augmentation import run_augmentation_single\n\nwarnings.filterwarnings('ignore')\n\n\nclass Dataset_ETT_hour(Dataset):\n    def __init__(self, args, root_path, flag='train', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, timeenc=0, freq='h', seasonal_patterns=None):\n        # size [seq_len, label_len, pred_len]\n        self.args = args\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        # border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]\n        border1s = [0, 12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24]\n        border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        print(\"self.set_type = \", self.set_type)\n        print(\"border1 = \", border1, \", border2 = \")\n        print(\"data.shape = \", data.shape)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n\n        if self.set_type == 0 and self.args.augmentation_ratio > 0:\n            self.data_x, self.data_y, augmentation_tags = run_augmentation_single(self.data_x, self.data_y, self.args)\n            \n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        seq_y = self.data_y[r_begin:r_end]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_ETT_minute(Dataset):\n    def __init__(self, args, root_path, flag='train', size=None,\n                 features='S', data_path='ETTm1.csv',\n                 target='OT', scale=True, timeenc=0, freq='t', seasonal_patterns=None):\n        # size [seq_len, label_len, pred_len]\n        self.args = args\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        # border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]\n        border1s = [0, 12 * 30 * 24 * 4 , 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4]\n        border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n            df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n\n        if self.set_type == 0 and self.args.augmentation_ratio > 0:\n            self.data_x, self.data_y, augmentation_tags = run_augmentation_single(self.data_x, self.data_y, self.args)\n\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        seq_y = self.data_y[r_begin:r_end]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_Custom(Dataset):\n    def __init__(self, args, root_path, flag='train', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, timeenc=0, freq='h', seasonal_patterns=None):\n        # size [seq_len, label_len, pred_len]\n        self.args = args\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        '''\n        df_raw.columns: ['date', ...(other features), target feature]\n        '''\n        cols = list(df_raw.columns)\n        cols.remove(self.target)\n        cols.remove('date')\n        df_raw = df_raw[['date'] + cols + [self.target]]\n\n        if self.data_path == 'electricity.csv':\n            num_train = int(len(df_raw) * 0.6)\n            num_test = int(len(df_raw) * 0.2) + 1\n            num_vali = len(df_raw) - num_train - num_test\n            border1s = [0, num_train, len(df_raw) - num_test]\n\n            border2s = [num_train, num_train + num_vali, len(df_raw)]\n            border1 = border1s[self.set_type]\n            border2 = border2s[self.set_type]\n\n        else:\n\n            num_train = int(len(df_raw) * 0.7)\n            num_test = int(len(df_raw) * 0.2)\n            num_vali = len(df_raw) - num_train - num_test\n            border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n\n            border2s = [num_train, num_train + num_vali, len(df_raw)]\n            border1 = border1s[self.set_type]\n            border2 = border2s[self.set_type]\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n\n        if self.set_type == 0 and self.args.augmentation_ratio > 0:\n            self.data_x, self.data_y, augmentation_tags = run_augmentation_single(self.data_x, self.data_y, self.args)\n\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        seq_y = self.data_y[r_begin:r_end]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_M4(Dataset):\n    def __init__(self, args, root_path, flag='pred', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=False, inverse=False, timeenc=0, freq='15min',\n                 seasonal_patterns='Yearly'):\n        # size [seq_len, label_len, pred_len]\n        # init\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.inverse = inverse\n        self.timeenc = timeenc\n        self.root_path = root_path\n\n        self.seq_len = size[0]\n        self.label_len = size[1]\n        self.pred_len = size[2]\n\n        self.seasonal_patterns = seasonal_patterns\n        self.history_size = M4Meta.history_size[seasonal_patterns]\n        self.window_sampling_limit = int(self.history_size * self.pred_len)\n        self.flag = flag\n\n        self.__read_data__()\n\n    def __read_data__(self):\n        # M4Dataset.initialize()\n        if self.flag == 'train':\n            dataset = M4Dataset.load(training=True, dataset_file=self.root_path)\n        else:\n            dataset = M4Dataset.load(training=False, dataset_file=self.root_path)\n        training_values = np.array(\n            [v[~np.isnan(v)] for v in\n             dataset.values[dataset.groups == self.seasonal_patterns]])  # split different frequencies\n        self.ids = np.array([i for i in dataset.ids[dataset.groups == self.seasonal_patterns]])\n        self.timeseries = [ts for ts in training_values]\n\n    def __getitem__(self, index):\n        insample = np.zeros((self.seq_len, 1))\n        insample_mask = np.zeros((self.seq_len, 1))\n        outsample = np.zeros((self.pred_len + self.label_len, 1))\n        outsample_mask = np.zeros((self.pred_len + self.label_len, 1))  # m4 dataset\n\n        sampled_timeseries = self.timeseries[index]\n        cut_point = np.random.randint(low=max(1, len(sampled_timeseries) - self.window_sampling_limit),\n                                      high=len(sampled_timeseries),\n                                      size=1)[0]\n\n        insample_window = sampled_timeseries[max(0, cut_point - self.seq_len):cut_point]\n        insample[-len(insample_window):, 0] = insample_window\n        insample_mask[-len(insample_window):, 0] = 1.0\n        outsample_window = sampled_timeseries[\n                           cut_point - self.label_len:min(len(sampled_timeseries), cut_point + self.pred_len)]\n        outsample[:len(outsample_window), 0] = outsample_window\n        outsample_mask[:len(outsample_window), 0] = 1.0\n        return insample, outsample, insample_mask, outsample_mask\n\n    def __len__(self):\n        return len(self.timeseries)\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n    def last_insample_window(self):\n        \"\"\"\n        The last window of insample size of all timeseries.\n        This function does not support batching and does not reshuffle timeseries.\n\n        :return: Last insample window of all timeseries. Shape \"timeseries, insample size\"\n        \"\"\"\n        insample = np.zeros((len(self.timeseries), self.seq_len))\n        insample_mask = np.zeros((len(self.timeseries), self.seq_len))\n        for i, ts in enumerate(self.timeseries):\n            ts_last_window = ts[-self.seq_len:]\n            insample[i, -len(ts):] = ts_last_window\n            insample_mask[i, -len(ts):] = 1.0\n        return insample, insample_mask\n\n\nclass PSMSegLoader(Dataset):\n    def __init__(self, args, root_path, win_size, step=1, flag=\"train\"):\n        self.flag = flag\n        self.step = step\n        self.win_size = win_size\n        self.scaler = StandardScaler()\n        data = pd.read_csv(os.path.join(root_path, 'train.csv'))\n        data = data.values[:, 1:]\n        data = np.nan_to_num(data)\n        self.scaler.fit(data)\n        data = self.scaler.transform(data)\n        test_data = pd.read_csv(os.path.join(root_path, 'test.csv'))\n        test_data = test_data.values[:, 1:]\n        test_data = np.nan_to_num(test_data)\n        self.test = self.scaler.transform(test_data)\n        self.train = data\n        data_len = len(self.train)\n        self.val = self.train[(int)(data_len * 0.8):]\n        self.test_labels = pd.read_csv(os.path.join(root_path, 'test_label.csv')).values[:, 1:]\n        print(\"test:\", self.test.shape)\n        print(\"train:\", self.train.shape)\n\n    def __len__(self):\n        if self.flag == \"train\":\n            return (self.train.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'val'):\n            return (self.val.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'test'):\n            return (self.test.shape[0] - self.win_size) // self.step + 1\n        else:\n            return (self.test.shape[0] - self.win_size) // self.win_size + 1\n\n    def __getitem__(self, index):\n        index = index * self.step\n        if self.flag == \"train\":\n            return np.float32(self.train[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'val'):\n            return np.float32(self.val[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'test'):\n            return np.float32(self.test[index:index + self.win_size]), np.float32(\n                self.test_labels[index:index + self.win_size])\n        else:\n            return np.float32(self.test[\n                              index // self.step * self.win_size:index // self.step * self.win_size + self.win_size]), np.float32(\n                self.test_labels[index // self.step * self.win_size:index // self.step * self.win_size + self.win_size])\n\n\nclass MSLSegLoader(Dataset):\n    def __init__(self, args, root_path, win_size, step=1, flag=\"train\"):\n        self.flag = flag\n        self.step = step\n        self.win_size = win_size\n        self.scaler = StandardScaler()\n        data = np.load(os.path.join(root_path, \"MSL_train.npy\"))\n        self.scaler.fit(data)\n        data = self.scaler.transform(data)\n        test_data = np.load(os.path.join(root_path, \"MSL_test.npy\"))\n        self.test = self.scaler.transform(test_data)\n        self.train = data\n        data_len = len(self.train)\n        self.val = self.train[(int)(data_len * 0.8):]\n        self.test_labels = np.load(os.path.join(root_path, \"MSL_test_label.npy\"))\n        print(\"test:\", self.test.shape)\n        print(\"train:\", self.train.shape)\n\n    def __len__(self):\n        if self.flag == \"train\":\n            return (self.train.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'val'):\n            return (self.val.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'test'):\n            return (self.test.shape[0] - self.win_size) // self.step + 1\n        else:\n            return (self.test.shape[0] - self.win_size) // self.win_size + 1\n\n    def __getitem__(self, index):\n        index = index * self.step\n        if self.flag == \"train\":\n            return np.float32(self.train[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'val'):\n            return np.float32(self.val[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'test'):\n            return np.float32(self.test[index:index + self.win_size]), np.float32(\n                self.test_labels[index:index + self.win_size])\n        else:\n            return np.float32(self.test[\n                              index // self.step * self.win_size:index // self.step * self.win_size + self.win_size]), np.float32(\n                self.test_labels[index // self.step * self.win_size:index // self.step * self.win_size + self.win_size])\n\n\nclass SMAPSegLoader(Dataset):\n    def __init__(self, args, root_path, win_size, step=1, flag=\"train\"):\n        self.flag = flag\n        self.step = step\n        self.win_size = win_size\n        self.scaler = StandardScaler()\n        data = np.load(os.path.join(root_path, \"SMAP_train.npy\"))\n        self.scaler.fit(data)\n        data = self.scaler.transform(data)\n        test_data = np.load(os.path.join(root_path, \"SMAP_test.npy\"))\n        self.test = self.scaler.transform(test_data)\n        self.train = data\n        data_len = len(self.train)\n        self.val = self.train[(int)(data_len * 0.8):]\n        self.test_labels = np.load(os.path.join(root_path, \"SMAP_test_label.npy\"))\n        print(\"test:\", self.test.shape)\n        print(\"train:\", self.train.shape)\n\n    def __len__(self):\n\n        if self.flag == \"train\":\n            return (self.train.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'val'):\n            return (self.val.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'test'):\n            return (self.test.shape[0] - self.win_size) // self.step + 1\n        else:\n            return (self.test.shape[0] - self.win_size) // self.win_size + 1\n\n    def __getitem__(self, index):\n        index = index * self.step\n        if self.flag == \"train\":\n            return np.float32(self.train[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'val'):\n            return np.float32(self.val[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'test'):\n            return np.float32(self.test[index:index + self.win_size]), np.float32(\n                self.test_labels[index:index + self.win_size])\n        else:\n            return np.float32(self.test[\n                              index // self.step * self.win_size:index // self.step * self.win_size + self.win_size]), np.float32(\n                self.test_labels[index // self.step * self.win_size:index // self.step * self.win_size + self.win_size])\n\n\nclass SMDSegLoader(Dataset):\n    def __init__(self, args, root_path, win_size, step=100, flag=\"train\"):\n        self.flag = flag\n        self.step = step\n        self.win_size = win_size\n        self.scaler = StandardScaler()\n        data = np.load(os.path.join(root_path, \"SMD_train.npy\"))\n        self.scaler.fit(data)\n        data = self.scaler.transform(data)\n        test_data = np.load(os.path.join(root_path, \"SMD_test.npy\"))\n        self.test = self.scaler.transform(test_data)\n        self.train = data\n        data_len = len(self.train)\n        self.val = self.train[(int)(data_len * 0.8):]\n        self.test_labels = np.load(os.path.join(root_path, \"SMD_test_label.npy\"))\n\n    def __len__(self):\n        if self.flag == \"train\":\n            return (self.train.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'val'):\n            return (self.val.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'test'):\n            return (self.test.shape[0] - self.win_size) // self.step + 1\n        else:\n            return (self.test.shape[0] - self.win_size) // self.win_size + 1\n\n    def __getitem__(self, index):\n        index = index * self.step\n        if self.flag == \"train\":\n            return np.float32(self.train[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'val'):\n            return np.float32(self.val[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'test'):\n            return np.float32(self.test[index:index + self.win_size]), np.float32(\n                self.test_labels[index:index + self.win_size])\n        else:\n            return np.float32(self.test[\n                              index // self.step * self.win_size:index // self.step * self.win_size + self.win_size]), np.float32(\n                self.test_labels[index // self.step * self.win_size:index // self.step * self.win_size + self.win_size])\n\n\nclass SWATSegLoader(Dataset):\n    def __init__(self, args, root_path, win_size, step=1, flag=\"train\"):\n        self.flag = flag\n        self.step = step\n        self.win_size = win_size\n        self.scaler = StandardScaler()\n\n        train_data = pd.read_csv(os.path.join(root_path, 'swat_train2.csv'))\n        test_data = pd.read_csv(os.path.join(root_path, 'swat2.csv'))\n        labels = test_data.values[:, -1:]\n        train_data = train_data.values[:, :-1]\n        test_data = test_data.values[:, :-1]\n\n        self.scaler.fit(train_data)\n        train_data = self.scaler.transform(train_data)\n        test_data = self.scaler.transform(test_data)\n        self.train = train_data\n        self.test = test_data\n        data_len = len(self.train)\n        self.val = self.train[(int)(data_len * 0.8):]\n        self.test_labels = labels\n        print(\"test:\", self.test.shape)\n        print(\"train:\", self.train.shape)\n\n    def __len__(self):\n        \"\"\"\n        Number of images in the object dataset.\n        \"\"\"\n        if self.flag == \"train\":\n            return (self.train.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'val'):\n            return (self.val.shape[0] - self.win_size) // self.step + 1\n        elif (self.flag == 'test'):\n            return (self.test.shape[0] - self.win_size) // self.step + 1\n        else:\n            return (self.test.shape[0] - self.win_size) // self.win_size + 1\n\n    def __getitem__(self, index):\n        index = index * self.step\n        if self.flag == \"train\":\n            return np.float32(self.train[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'val'):\n            return np.float32(self.val[index:index + self.win_size]), np.float32(self.test_labels[0:self.win_size])\n        elif (self.flag == 'test'):\n            return np.float32(self.test[index:index + self.win_size]), np.float32(\n                self.test_labels[index:index + self.win_size])\n        else:\n            return np.float32(self.test[\n                              index // self.step * self.win_size:index // self.step * self.win_size + self.win_size]), np.float32(\n                self.test_labels[index // self.step * self.win_size:index // self.step * self.win_size + self.win_size])\n\n\nclass UEAloader(Dataset):\n    \"\"\"\n    Dataset class for datasets included in:\n        Time Series Classification Archive (www.timeseriesclassification.com)\n    Argument:\n        limit_size: float in (0, 1) for debug\n    Attributes:\n        all_df: (num_samples * seq_len, num_columns) dataframe indexed by integer indices, with multiple rows corresponding to the same index (sample).\n            Each row is a time step; Each column contains either metadata (e.g. timestamp) or a feature.\n        feature_df: (num_samples * seq_len, feat_dim) dataframe; contains the subset of columns of `all_df` which correspond to selected features\n        feature_names: names of columns contained in `feature_df` (same as feature_df.columns)\n        all_IDs: (num_samples,) series of IDs contained in `all_df`/`feature_df` (same as all_df.index.unique() )\n        labels_df: (num_samples, num_labels) pd.DataFrame of label(s) for each sample\n        max_seq_len: maximum sequence (time series) length. If None, script argument `max_seq_len` will be used.\n            (Moreover, script argument overrides this attribute)\n    \"\"\"\n\n    def __init__(self, args, root_path, file_list=None, limit_size=None, flag=None):\n        self.args = args\n        self.root_path = root_path\n        self.flag = flag\n        self.all_df, self.labels_df = self.load_all(root_path, file_list=file_list, flag=flag)\n        self.all_IDs = self.all_df.index.unique()  # all sample IDs (integer indices 0 ... num_samples-1)\n\n        if limit_size is not None:\n            if limit_size > 1:\n                limit_size = int(limit_size)\n            else:  # interpret as proportion if in (0, 1]\n                limit_size = int(limit_size * len(self.all_IDs))\n            self.all_IDs = self.all_IDs[:limit_size]\n            self.all_df = self.all_df.loc[self.all_IDs]\n\n        # use all features\n        self.feature_names = self.all_df.columns\n        self.feature_df = self.all_df\n\n        # pre_process\n        normalizer = Normalizer()\n        self.feature_df = normalizer.normalize(self.feature_df)\n        print(len(self.all_IDs))\n\n    def load_all(self, root_path, file_list=None, flag=None):\n        \"\"\"\n        Loads datasets from csv files contained in `root_path` into a dataframe, optionally choosing from `pattern`\n        Args:\n            root_path: directory containing all individual .csv files\n            file_list: optionally, provide a list of file paths within `root_path` to consider.\n                Otherwise, entire `root_path` contents will be used.\n        Returns:\n            all_df: a single (possibly concatenated) dataframe with all data corresponding to specified files\n            labels_df: dataframe containing label(s) for each sample\n        \"\"\"\n        # Select paths for training and evaluation\n        if file_list is None:\n            data_paths = glob.glob(os.path.join(root_path, '*'))  # list of all paths\n        else:\n            data_paths = [os.path.join(root_path, p) for p in file_list]\n        if len(data_paths) == 0:\n            raise Exception('No files found using: {}'.format(os.path.join(root_path, '*')))\n        if flag is not None:\n            data_paths = list(filter(lambda x: re.search(flag, x), data_paths))\n        input_paths = [p for p in data_paths if os.path.isfile(p) and p.endswith('.ts')]\n        if len(input_paths) == 0:\n            pattern='*.ts'\n            raise Exception(\"No .ts files found using pattern: '{}'\".format(pattern))\n\n        all_df, labels_df = self.load_single(input_paths[0])  # a single file contains dataset\n\n        return all_df, labels_df\n\n    def load_single(self, filepath):\n        df, labels = load_from_tsfile_to_dataframe(filepath, return_separate_X_and_y=True,\n                                                             replace_missing_vals_with='NaN')\n        labels = pd.Series(labels, dtype=\"category\")\n        self.class_names = labels.cat.categories\n        labels_df = pd.DataFrame(labels.cat.codes,\n                                 dtype=np.int8)  # int8-32 gives an error when using nn.CrossEntropyLoss\n\n        lengths = df.applymap(\n            lambda x: len(x)).values  # (num_samples, num_dimensions) array containing the length of each series\n\n        horiz_diffs = np.abs(lengths - np.expand_dims(lengths[:, 0], -1))\n\n        if np.sum(horiz_diffs) > 0:  # if any row (sample) has varying length across dimensions\n            df = df.applymap(subsample)\n\n        lengths = df.applymap(lambda x: len(x)).values\n        vert_diffs = np.abs(lengths - np.expand_dims(lengths[0, :], 0))\n        if np.sum(vert_diffs) > 0:  # if any column (dimension) has varying length across samples\n            self.max_seq_len = int(np.max(lengths[:, 0]))\n        else:\n            self.max_seq_len = lengths[0, 0]\n\n        # First create a (seq_len, feat_dim) dataframe for each sample, indexed by a single integer (\"ID\" of the sample)\n        # Then concatenate into a (num_samples * seq_len, feat_dim) dataframe, with multiple rows corresponding to the\n        # sample index (i.e. the same scheme as all datasets in this project)\n\n        df = pd.concat((pd.DataFrame({col: df.loc[row, col] for col in df.columns}).reset_index(drop=True).set_index(\n            pd.Series(lengths[row, 0] * [row])) for row in range(df.shape[0])), axis=0)\n\n        # Replace NaN values\n        grp = df.groupby(by=df.index)\n        df = grp.transform(interpolate_missing)\n\n        return df, labels_df\n\n    def instance_norm(self, case):\n        if self.root_path.count('EthanolConcentration') > 0:  # special process for numerical stability\n            mean = case.mean(0, keepdim=True)\n            case = case - mean\n            stdev = torch.sqrt(torch.var(case, dim=1, keepdim=True, unbiased=False) + 1e-5)\n            case /= stdev\n            return case\n        else:\n            return case\n\n    def __getitem__(self, ind):\n        batch_x = self.feature_df.loc[self.all_IDs[ind]].values\n        labels = self.labels_df.loc[self.all_IDs[ind]].values\n        if self.flag == \"TRAIN\" and self.args.augmentation_ratio > 0:\n            num_samples = len(self.all_IDs)\n            num_columns = self.feature_df.shape[1]\n            seq_len = int(self.feature_df.shape[0] / num_samples)\n            batch_x = batch_x.reshape((1, seq_len, num_columns))\n            batch_x, labels, augmentation_tags = run_augmentation_single(batch_x, labels, self.args)\n\n            batch_x = batch_x.reshape((1 * seq_len, num_columns))\n\n        return self.instance_norm(torch.from_numpy(batch_x)), \\\n               torch.from_numpy(labels)\n\n    def __len__(self):\n        return len(self.all_IDs)\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/data_provider/data_loader_tempo.py",
    "content": "import numpy as np\nimport pandas as pd\nimport os\nimport torch\nfrom torch.utils.data import Dataset\nfrom sklearn.preprocessing import StandardScaler\nfrom Other_baselines.utils.timefeatures import time_features\nfrom Other_baselines.utils.tools import convert_tsf_to_dataframe\nimport warnings\nimport pickle\nfrom statsmodels.tsa.seasonal import STL\n\nwarnings.filterwarnings('ignore')\n\nstl_position = 'stl/'\n\n\nclass Dataset_ETT_hour(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, timeenc=0, freq='h',\n                 percent=100, data_name='etth2', max_len=-1, train_all=False):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.percent = percent\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.data_name = data_name\n        self.__read_data__()\n\n        self.enc_in = self.data_x.shape[-1]\n        print(\"self.enc_in = {}\".format(self.enc_in))\n        print(\"self.data_x = {}\".format(self.data_x.shape))\n        self.tot_len = len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def stl_resolve(self, data_raw, data_name):\n        \"\"\"\n        STL Global Decomposition\n        \"\"\"\n        # self.data_name = 'etth1'\n        self.data_name = data_name\n        save_stl = stl_position + self.data_name\n        # save_stl = 'stl/' + 'weather'\n\n        self.save_stl = save_stl\n        trend_pk = self.save_stl + '/trend.pk'\n        seasonal_pk = self.save_stl + '/seasonal.pk'\n        resid_pk = self.save_stl + '/resid.pk'\n        if os.path.isfile(trend_pk) and os.path.isfile(seasonal_pk) and os.path.isfile(resid_pk):\n            with open(trend_pk, 'rb') as f:\n                trend_stamp = pickle.load(f)\n            with open(seasonal_pk, 'rb') as f:\n                seasonal_stamp = pickle.load(f)\n            with open(resid_pk, 'rb') as f:\n                resid_stamp = pickle.load(f)\n        else:\n            os.makedirs(self.save_stl, exist_ok=True)\n            data_raw['date'] = pd.to_datetime(data_raw['date'])\n            data_raw.set_index('date', inplace=True)\n\n            [n, m] = data_raw.shape\n\n            trend_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n            seasonal_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n            resid_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n\n            cols = data_raw.columns\n            for i, col in enumerate(cols):\n                df = data_raw[col]\n                # df = df.resample(self.args.freq).mean().ffill()\n                if 'weather' in self.data_name:  # == 'weather':\n                    res = STL(df, period=24 * 6).fit()\n                elif 'ill' in self.data_name:  # == :\n                    res = STL(df, period=7).fit()\n                elif 'etth1' in self.data_name or 'etth2' in self.data_name:\n                    res = STL(df, period=24).fit()\n                else:\n                    res = STL(df, period=24 * 2).fit()\n\n                trend_stamp[:, i] = torch.tensor(np.array(res.trend.values), dtype=torch.float32)\n                seasonal_stamp[:, i] = torch.tensor(np.array(res.seasonal.values), dtype=torch.float32)\n                resid_stamp[:, i] = torch.tensor(np.array(res.resid.values), dtype=torch.float32)\n            with open(trend_pk, 'wb') as f:\n                pickle.dump(trend_stamp, f)\n            with open(seasonal_pk, 'wb') as f:\n                pickle.dump(seasonal_stamp, f)\n            with open(resid_pk, 'wb') as f:\n                pickle.dump(resid_stamp, f)\n        return trend_stamp, seasonal_stamp, resid_stamp\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        # border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]\n        border1s = [0, 12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24]\n        border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n\n        if self.set_type == 0:\n            border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n\n        # After we get data, we do the stl resolve\n        col_date = df_raw.columns[:1]\n        df_time = df_raw[col_date]\n        data_raw = pd.DataFrame.join(df_time, pd.DataFrame(data))  # [border1:border2]\n        trend_stamp, seasonal_stamp, resid_stamp = self.stl_resolve(data_raw=data_raw, data_name=self.data_name)\n        # end -dove\n\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n        self.trend_stamp = trend_stamp[border1:border2]\n        self.seasonal_stamp = seasonal_stamp[border1:border2]\n        self.resid_stamp = resid_stamp[border1:border2]\n\n    def __getitem__(self, index):\n        feat_id = index // self.tot_len\n        s_begin = index % self.tot_len\n\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n        seq_x = self.data_x[s_begin:s_end, feat_id:feat_id + 1]\n        seq_y = self.data_y[r_begin:r_end, feat_id:feat_id + 1]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n        seq_trend = self.trend_stamp[s_begin:s_end, feat_id:feat_id + 1]\n        seq_seasonal = self.seasonal_stamp[s_begin:s_end, feat_id:feat_id + 1]\n        seq_resid = self.resid_stamp[s_begin:s_end, feat_id:feat_id + 1]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark, seq_trend, seq_seasonal, seq_resid\n\n    def __len__(self):\n        return (len(self.data_x) - self.seq_len - self.pred_len + 1) * self.enc_in\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_ETT_minute(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTm1.csv',\n                 target='OT', scale=True, timeenc=0, freq='t',\n                 percent=100, max_len=-1, data_name='ettm2', train_all=False):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n        self.percent = percent\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.data_name = data_name\n        self.__read_data__()\n\n        self.enc_in = self.data_x.shape[-1]\n        self.tot_len = len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def stl_resolve(self, data_raw, data_name):\n        \"\"\"\n        STL Global Decomposition\n        \"\"\"\n        # self.data_name = 'etth1'\n        self.data_name = data_name\n        save_stl = stl_position + self.data_name\n        # save_stl = 'stl/' + 'weather'\n\n        self.save_stl = save_stl\n        trend_pk = self.save_stl + '/trend.pk'\n        seasonal_pk = self.save_stl + '/seasonal.pk'\n        resid_pk = self.save_stl + '/resid.pk'\n        if os.path.isfile(trend_pk) and os.path.isfile(seasonal_pk) and os.path.isfile(resid_pk):\n            with open(trend_pk, 'rb') as f:\n                trend_stamp = pickle.load(f)\n            with open(seasonal_pk, 'rb') as f:\n                seasonal_stamp = pickle.load(f)\n            with open(resid_pk, 'rb') as f:\n                resid_stamp = pickle.load(f)\n        else:\n            os.makedirs(self.save_stl, exist_ok=True)\n            data_raw['date'] = pd.to_datetime(data_raw['date'])\n            data_raw.set_index('date', inplace=True)\n\n            [n, m] = data_raw.shape\n\n            trend_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n            seasonal_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n            resid_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n\n            cols = data_raw.columns\n            for i, col in enumerate(cols):\n                df = data_raw[col]\n                # df = df.resample(self.args.freq).mean().ffill()\n                if 'weather' in self.data_name:  # == 'weather':\n                    res = STL(df, period=24 * 6).fit()\n                elif 'ill' in self.data_name:\n                    res = STL(df).fit()  # , period = 7 52？\n                elif 'etth1' in self.data_name or 'etth2' in self.data_name:\n                    res = STL(df, period=24).fit()\n                elif 'ettm1' in self.data_name or 'ettm2' in self.data_name:\n                    res = STL(df, period=24 * 4).fit()\n                else:\n                    res = STL(df).fit()\n\n                trend_stamp[:, i] = torch.tensor(np.array(res.trend.values), dtype=torch.float32)\n                seasonal_stamp[:, i] = torch.tensor(np.array(res.seasonal.values), dtype=torch.float32)\n                resid_stamp[:, i] = torch.tensor(np.array(res.resid.values), dtype=torch.float32)\n            with open(trend_pk, 'wb') as f:\n                pickle.dump(trend_stamp, f)\n            with open(seasonal_pk, 'wb') as f:\n                pickle.dump(seasonal_stamp, f)\n            with open(resid_pk, 'wb') as f:\n                pickle.dump(resid_stamp, f)\n        return trend_stamp, seasonal_stamp, resid_stamp\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        # border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]\n        border1s = [0, 12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4]\n        border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n        if self.set_type == 0:\n            border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n            df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        # After we get data, we do the stl resolve\n        col_date = df_raw.columns[:1]\n        df_time = df_raw[col_date]\n        data_raw = pd.DataFrame.join(df_time, pd.DataFrame(data))  # [border1:border2]\n        trend_stamp, seasonal_stamp, resid_stamp = self.stl_resolve(data_raw=data_raw, data_name=self.data_name)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n        self.trend_stamp = trend_stamp[border1:border2]\n        self.seasonal_stamp = seasonal_stamp[border1:border2]\n        self.resid_stamp = resid_stamp[border1:border2]\n\n    def __getitem__(self, index):\n        feat_id = index // self.tot_len\n        s_begin = index % self.tot_len\n\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n        seq_x = self.data_x[s_begin:s_end, feat_id:feat_id + 1]\n        seq_y = self.data_y[r_begin:r_end, feat_id:feat_id + 1]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n        seq_trend = self.trend_stamp[s_begin:s_end, feat_id:feat_id + 1]\n        seq_seasonal = self.seasonal_stamp[s_begin:s_end, feat_id:feat_id + 1]\n        seq_resid = self.resid_stamp[s_begin:s_end, feat_id:feat_id + 1]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark, seq_trend, seq_seasonal, seq_resid\n\n    def __len__(self):\n        return (len(self.data_x) - self.seq_len - self.pred_len + 1) * self.enc_in\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_Custom(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, timeenc=0, freq='h',\n                 percent=10, data_name='weather', max_len=-1, train_all=False):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n        self.percent = percent\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.data_name = data_name\n        self.__read_data__()\n\n        self.enc_in = self.data_x.shape[-1]\n        self.tot_len = len(self.data_x) - self.seq_len - self.pred_len + 1\n        # self.save_stl = 'stl/'\n\n    def stl_resolve(self, data_raw):\n        \"\"\"\n        STL Global Decomposition\n        \"\"\"\n\n        save_stl = stl_position + self.data_name\n        # save_stl = 'stl/' + 'weather'\n\n        self.save_stl = save_stl\n        trend_pk = self.save_stl + '/trend.pk'\n        seasonal_pk = self.save_stl + '/seasonal.pk'\n        resid_pk = self.save_stl + '/resid.pk'\n        if os.path.isfile(trend_pk) and os.path.isfile(seasonal_pk) and os.path.isfile(resid_pk):\n            with open(trend_pk, 'rb') as f:\n                trend_stamp = pickle.load(f)\n            with open(seasonal_pk, 'rb') as f:\n                seasonal_stamp = pickle.load(f)\n            with open(resid_pk, 'rb') as f:\n                resid_stamp = pickle.load(f)\n        else:\n            os.makedirs(self.save_stl, exist_ok=True)\n            data_raw['date'] = pd.to_datetime(data_raw['date'])\n            data_raw.set_index('date', inplace=True)\n\n            [n, m] = data_raw.shape\n\n            trend_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n            seasonal_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n            resid_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n\n            cols = data_raw.columns\n            for i, col in enumerate(cols):\n                df = data_raw[col]\n                # df = df.resample(self.args.freq).mean().ffill()\n\n                if 'weather' in self.data_name:  # == 'weather':\n                    res = STL(df, period=24 * 6).fit()\n                elif 'ill' in self.data_name:\n                    res = STL(df).fit()  # , period = 7 52？\n                elif 'etth1' in self.data_name or 'etth2' in self.data_name:\n                    res = STL(df, period=24).fit()\n                elif 'ettm1' in self.data_name or 'ettm2' in self.data_name:\n                    res = STL(df, period=24 * 4).fit()\n                elif 'traffic' in self.data_name or 'electricity' in self.data_name:\n                    res = STL(df, period=24).fit()\n                else:\n                    res = STL(df).fit()\n\n                trend_stamp[:, i] = torch.tensor(np.array(res.trend.values), dtype=torch.float32)\n                seasonal_stamp[:, i] = torch.tensor(np.array(res.seasonal.values), dtype=torch.float32)\n                resid_stamp[:, i] = torch.tensor(np.array(res.resid.values), dtype=torch.float32)\n            with open(trend_pk, 'wb') as f:\n                pickle.dump(trend_stamp, f)\n            with open(seasonal_pk, 'wb') as f:\n                pickle.dump(seasonal_stamp, f)\n            with open(resid_pk, 'wb') as f:\n                pickle.dump(resid_stamp, f)\n        return trend_stamp, seasonal_stamp, resid_stamp\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        '''\n        df_raw.columns: ['date', ...(other features), target feature]\n        '''\n        cols = list(df_raw.columns)\n        cols.remove(self.target)\n        cols.remove('date')\n        df_raw = df_raw[['date'] + cols + [self.target]]\n\n        if self.data_path == 'electricity.csv':\n            num_train = int(len(df_raw) * 0.6)\n            num_test = int(len(df_raw) * 0.2) + 1\n            num_vali = len(df_raw) - num_train - num_test\n            border1s = [0, num_train, len(df_raw) - num_test]\n\n            border2s = [num_train, num_train + num_vali, len(df_raw)]\n            border1 = border1s[self.set_type]\n            border2 = border2s[self.set_type]\n\n        else:\n            # print(cols)\n            num_train = int(len(df_raw) * 0.7)\n            num_test = int(len(df_raw) * 0.2)\n            num_vali = len(df_raw) - num_train - num_test\n            border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n\n            border2s = [num_train, num_train + num_vali, len(df_raw)]\n            border1 = border1s[self.set_type]\n            border2 = border2s[self.set_type]\n\n        if self.set_type == 0:\n            border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        # After we get data, we do the stl resolve\n        col_date = df_raw.columns[:1]\n        df_time = df_raw[col_date]\n        data_raw = pd.DataFrame.join(df_time, pd.DataFrame(data))  # [border1:border2]\n        trend_stamp, seasonal_stamp, resid_stamp = self.stl_resolve(data_raw=data_raw)\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.trend_stamp = trend_stamp[border1:border2]\n        self.seasonal_stamp = seasonal_stamp[border1:border2]\n        self.resid_stamp = resid_stamp[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        feat_id = index // self.tot_len\n        s_begin = index % self.tot_len\n        # print(\"feat_id = \", feat_id, \", s_begin = \", s_begin)\n\n        # s_begin = index\n\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n        seq_x = self.data_x[s_begin:s_end, feat_id:feat_id + 1]\n        seq_y = self.data_y[r_begin:r_end, feat_id:feat_id + 1]\n        seq_trend = self.trend_stamp[s_begin:s_end, feat_id:feat_id + 1]\n        seq_seasonal = self.seasonal_stamp[s_begin:s_end, feat_id:feat_id + 1]\n        seq_resid = self.resid_stamp[s_begin:s_end, feat_id:feat_id + 1]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n        # print(\"seq_x = \", len(seq_x), len(seq_y), len(seq_y_mark), len(seq_seasonal), len(seq_resid))\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark, seq_trend, seq_seasonal, seq_resid\n\n    def __len__(self):\n        # return 1000 #(\n        return (len(self.data_x) - self.seq_len - self.pred_len + 1) * self.enc_in\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_Pred(Dataset):\n    def __init__(self, root_path, flag='pred', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None,\n                 percent=None, train_all=False, period=24, max_len=-1, data_name='weather'):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['pred']\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.inverse = inverse\n        self.timeenc = timeenc\n        self.freq = freq\n        self.cols = cols\n        self.root_path = root_path\n        self.data_path = data_path\n        self.period = period\n        self.data_name = data_name\n        self.__read_data__()\n\n    def stl_resolve(self, data_raw, period=24):\n        \"\"\"\n        STL Global Decomposition\n        \"\"\"\n\n        save_stl = stl_position + self.data_name\n        # save_stl = 'stl/' + 'weather'\n\n        self.save_stl = save_stl\n        trend_pk = self.save_stl + '/trend.pk'\n        seasonal_pk = self.save_stl + '/seasonal.pk'\n        resid_pk = self.save_stl + '/resid.pk'\n        if os.path.isfile(trend_pk) and os.path.isfile(seasonal_pk) and os.path.isfile(resid_pk):\n            with open(trend_pk, 'rb') as f:\n                trend_stamp = pickle.load(f)\n            with open(seasonal_pk, 'rb') as f:\n                seasonal_stamp = pickle.load(f)\n            with open(resid_pk, 'rb') as f:\n                resid_stamp = pickle.load(f)\n        else:\n            os.makedirs(self.save_stl, exist_ok=True)\n            data_raw['date'] = pd.to_datetime(data_raw['date'])\n            data_raw.set_index('date', inplace=True)\n\n            [n, m] = data_raw.shape\n\n            trend_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n            seasonal_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n            resid_stamp = torch.zeros([len(data_raw), m], dtype=torch.float32)\n\n            cols = data_raw.columns\n            for i, col in enumerate(cols):\n                df = data_raw[col]\n                # df = df.resample(self.args.freq).mean().ffill()\n\n                if 'weather' in self.data_name:  # == 'weather':\n                    res = STL(df, period=24 * 6).fit()\n                elif 'ill' in self.data_name:\n                    res = STL(df).fit()  #\n                elif 'etth1' in self.data_name or 'etth2' in self.data_name:\n                    res = STL(df, period=24).fit()\n                elif 'ettm1' in self.data_name or 'ettm2' in self.data_name:\n                    res = STL(df, period=24 * 4).fit()\n                elif 'traffic' in self.data_name or 'electricity' in self.data_name:\n                    res = STL(df, period=24).fit()\n                else:\n                    res = STL(df, period=period).fit()\n\n                trend_stamp[:, i] = torch.tensor(np.array(res.trend.values), dtype=torch.float32)\n                seasonal_stamp[:, i] = torch.tensor(np.array(res.seasonal.values), dtype=torch.float32)\n                resid_stamp[:, i] = torch.tensor(np.array(res.resid.values), dtype=torch.float32)\n            with open(trend_pk, 'wb') as f:\n                pickle.dump(trend_stamp, f)\n            with open(seasonal_pk, 'wb') as f:\n                pickle.dump(seasonal_stamp, f)\n            with open(resid_pk, 'wb') as f:\n                pickle.dump(resid_stamp, f)\n        return trend_stamp, seasonal_stamp, resid_stamp\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n        '''\n        df_raw.columns: ['date', ...(other features), target feature]\n        '''\n        if self.cols:\n            cols = self.cols.copy()\n            cols.remove(self.target)\n        else:\n            cols = list(df_raw.columns)\n            cols.remove(self.target)\n            cols.remove('date')\n        df_raw = df_raw[['date'] + cols + [self.target]]\n        border1 = 0  # len(df_raw) - self.seq_len\n        border2 = int(0.1 * len(df_raw)) - self.seq_len + 1\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            self.scaler.fit(df_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        tmp_stamp = df_raw[['date']][border1:border2]\n        tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)\n        pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq)\n\n        col_date = df_raw.columns[:1]\n        df_time = df_raw[col_date]\n        data_raw = pd.DataFrame.join(df_time, pd.DataFrame(data))  # [border1:border2]\n        trend_stamp, seasonal_stamp, resid_stamp = self.stl_resolve(data_raw=data_raw, period=self.period)\n\n        df_stamp = pd.DataFrame(columns=['date'])\n        df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n            df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.trend_stamp = trend_stamp[border1:border2]\n        self.seasonal_stamp = seasonal_stamp[border1:border2]\n        self.resid_stamp = resid_stamp[border1:border2]\n        if self.inverse:\n            self.data_y = df_data.values[border1:border2]\n        else:\n            self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        if self.inverse:\n            seq_y = self.data_x[r_begin:r_begin + self.label_len]\n        else:\n            seq_y = self.data_y[r_begin:r_begin + self.label_len]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n        seq_trend = self.trend_stamp[s_begin:s_end]\n        seq_seasonal = self.seasonal_stamp[s_begin:s_end]\n        seq_resid = self.resid_stamp[s_begin:s_end]\n\n        return seq_x.reshape(-1, 1), seq_y.reshape(-1, 1), seq_x_mark, seq_y_mark, seq_trend.reshape(-1,\n                                                                                                     1), seq_seasonal.reshape(\n            -1, 1), seq_resid.reshape(-1, 1)\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_TSF(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path=None,\n                 target='OT', scale=True, timeenc=0, freq='Daily',\n                 percent=10, max_len=-1, train_all=False):\n\n        self.train_all = train_all\n\n        self.seq_len = size[0]\n        self.pred_len = size[2]\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.percent = percent\n        self.max_len = max_len\n        if self.max_len == -1:\n            self.max_len = 1e8\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.timeseries = self.__read_data__()\n\n    def __read_data__(self):\n        df, frequency, forecast_horizon, contain_missing_values, contain_equal_length = convert_tsf_to_dataframe(\n            os.path.join(self.root_path,\n                         self.data_path))\n        self.freq = frequency\n\n        def dropna(x):\n            return x[~np.isnan(x)]\n\n        timeseries = [dropna(ts).astype(np.float32) for ts in df.series_value]\n\n        self.tot_len = 0\n        self.len_seq = []\n        self.seq_id = []\n        for i in range(len(timeseries)):\n            res_len = max(self.pred_len + self.seq_len - timeseries[i].shape[0], 0)\n            pad_zeros = np.zeros(res_len)\n            timeseries[i] = np.hstack([pad_zeros, timeseries[i]])\n\n            _len = timeseries[i].shape[0]\n            train_len = _len - self.pred_len\n            if self.train_all:\n                border1s = [0, 0, train_len - self.seq_len]\n                border2s = [train_len, train_len, _len]\n            else:\n                border1s = [0, train_len - self.seq_len - self.pred_len, train_len - self.seq_len]\n                border2s = [train_len - self.pred_len, train_len, _len]\n            border2s[0] = (border2s[0] - self.seq_len) * self.percent // 100 + self.seq_len\n            # print(\"_len = {}\".format(_len))\n\n            curr_len = border2s[self.set_type] - max(border1s[self.set_type], 0) - self.pred_len - self.seq_len + 1\n            curr_len = max(0, curr_len)\n\n            self.len_seq.append(np.zeros(curr_len) + self.tot_len)\n            self.seq_id.append(np.zeros(curr_len) + i)\n            self.tot_len += curr_len\n\n        self.len_seq = np.hstack(self.len_seq)\n        self.seq_id = np.hstack(self.seq_id)\n\n        return timeseries\n\n    def __getitem__(self, index):\n        len_seq = self.len_seq[index]\n        seq_id = int(self.seq_id[index])\n        index = index - int(len_seq)\n\n        _len = self.timeseries[seq_id].shape[0]\n        train_len = _len - self.pred_len\n        if self.train_all:\n            border1s = [0, 0, train_len - self.seq_len]\n            border2s = [train_len, train_len, _len]\n        else:\n            border1s = [0, train_len - self.seq_len - self.pred_len, train_len - self.seq_len]\n            border2s = [train_len - self.pred_len, train_len, _len]\n        border2s[0] = (border2s[0] - self.seq_len) * self.percent // 100 + self.seq_len\n\n        s_begin = index + border1s[self.set_type]\n        s_end = s_begin + self.seq_len\n        r_begin = s_end\n        r_end = r_begin + self.pred_len\n        if self.set_type == 2:\n            s_end = -self.pred_len\n\n        data_x = self.timeseries[seq_id][s_begin:s_end]\n        data_y = self.timeseries[seq_id][r_begin:r_end]\n        data_x = np.expand_dims(data_x, axis=-1)\n        data_y = np.expand_dims(data_y, axis=-1)\n        # if self.set_type == 2:\n        #     print(\"data_x.shape = {}, data_y.shape = {}\".format(data_x.shape, data_y.shape))\n\n        return data_x, data_y, data_x, data_y\n\n    def __len__(self):\n        if self.set_type == 0:\n            # return self.tot_len\n            return min(self.max_len, self.tot_len)\n        else:\n            return self.tot_len\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/data_provider/m4.py",
    "content": "# This source code is provided for the purposes of scientific reproducibility\n# under the following limited license from Element AI Inc. The code is an\n# implementation of the N-BEATS model (Oreshkin et al., N-BEATS: Neural basis\n# expansion analysis for interpretable time series forecasting,\n# https://arxiv.org/abs/1905.10437). The copyright to the source code is\n# licensed under the Creative Commons - Attribution-NonCommercial 4.0\n# International license (CC BY-NC 4.0):\n# https://creativecommons.org/licenses/by-nc/4.0/.  Any commercial use (whether\n# for the benefit of third parties or internally in production) requires an\n# explicit license. The subject-matter of the N-BEATS model and associated\n# materials are the property of Element AI Inc. and may be subject to patent\n# protection. No license to patents is granted hereunder (whether express or\n# implied). Copyright © 2020 Element AI Inc. All rights reserved.\n\n\"\"\"\nM4 Dataset\n\"\"\"\nimport logging\nimport os\nfrom collections import OrderedDict\nfrom dataclasses import dataclass\nfrom glob import glob\n\nimport numpy as np\nimport pandas as pd\n# import patoolib\nfrom tqdm import tqdm\nimport logging\nimport os\nimport pathlib\nimport sys\nfrom urllib import request\n\n\ndef url_file_name(url: str) -> str:\n    \"\"\"\n    Extract file name from url.\n\n    :param url: URL to extract file name from.\n    :return: File name.\n    \"\"\"\n    return url.split('/')[-1] if len(url) > 0 else ''\n\n\ndef download(url: str, file_path: str) -> None:\n    \"\"\"\n    Download a file to the given path.\n\n    :param url: URL to download\n    :param file_path: Where to download the content.\n    \"\"\"\n\n    def progress(count, block_size, total_size):\n        progress_pct = float(count * block_size) / float(total_size) * 100.0\n        sys.stdout.write('\\rDownloading {} to {} {:.1f}%'.format(url, file_path, progress_pct))\n        sys.stdout.flush()\n\n    if not os.path.isfile(file_path):\n        opener = request.build_opener()\n        opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n        request.install_opener(opener)\n        pathlib.Path(os.path.dirname(file_path)).mkdir(parents=True, exist_ok=True)\n        f, _ = request.urlretrieve(url, file_path, progress)\n        sys.stdout.write('\\n')\n        sys.stdout.flush()\n        file_info = os.stat(f)\n        logging.info(f'Successfully downloaded {os.path.basename(file_path)} {file_info.st_size} bytes.')\n    else:\n        file_info = os.stat(file_path)\n        logging.info(f'File already exists: {file_path} {file_info.st_size} bytes.')\n\n\n@dataclass()\nclass M4Dataset:\n    ids: np.ndarray\n    groups: np.ndarray\n    frequencies: np.ndarray\n    horizons: np.ndarray\n    values: np.ndarray\n\n    @staticmethod\n    def load(training: bool = True, dataset_file: str = '../dataset/m4') -> 'M4Dataset':\n        \"\"\"\n        Load cached dataset.\n\n        :param training: Load training part if training is True, test part otherwise.\n        \"\"\"\n        info_file = os.path.join(dataset_file, 'M4-info.csv')\n        train_cache_file = os.path.join(dataset_file, 'training.npz')\n        test_cache_file = os.path.join(dataset_file, 'test.npz')\n        m4_info = pd.read_csv(info_file)\n        return M4Dataset(ids=m4_info.M4id.values,\n                         groups=m4_info.SP.values,\n                         frequencies=m4_info.Frequency.values,\n                         horizons=m4_info.Horizon.values,\n                         values=np.load(\n                             train_cache_file if training else test_cache_file,\n                             allow_pickle=True))\n\n\n@dataclass()\nclass M4Meta:\n    seasonal_patterns = ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Hourly']\n    horizons = [6, 8, 18, 13, 14, 48]\n    frequencies = [1, 4, 12, 1, 1, 24]\n    horizons_map = {\n        'Yearly': 6,\n        'Quarterly': 8,\n        'Monthly': 18,\n        'Weekly': 13,\n        'Daily': 14,\n        'Hourly': 48\n    }  # different predict length\n    frequency_map = {\n        'Yearly': 1,\n        'Quarterly': 4,\n        'Monthly': 12,\n        'Weekly': 1,\n        'Daily': 1,\n        'Hourly': 24\n    }\n    history_size = {\n        'Yearly': 1.5,\n        'Quarterly': 1.5,\n        'Monthly': 1.5,\n        'Weekly': 10,\n        'Daily': 10,\n        'Hourly': 10\n    }  # from interpretable.gin\n\n\ndef load_m4_info() -> pd.DataFrame:\n    \"\"\"\n    Load M4Info file.\n\n    :return: Pandas DataFrame of M4Info.\n    \"\"\"\n    return pd.read_csv(INFO_FILE_PATH)\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/data_provider/uea.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport torch\n\n\ndef collate_fn(data, max_len=None):\n    \"\"\"Build mini-batch tensors from a list of (X, mask) tuples. Mask input. Create\n    Args:\n        data: len(batch_size) list of tuples (X, y).\n            - X: torch tensor of shape (seq_length, feat_dim); variable seq_length.\n            - y: torch tensor of shape (num_labels,) : class indices or numerical targets\n                (for classification or regression, respectively). num_labels > 1 for multi-task models\n        max_len: global fixed sequence length. Used for architectures requiring fixed length input,\n            where the batch length cannot vary dynamically. Longer sequences are clipped, shorter are padded with 0s\n    Returns:\n        X: (batch_size, padded_length, feat_dim) torch tensor of masked features (input)\n        targets: (batch_size, padded_length, feat_dim) torch tensor of unmasked features (output)\n        target_masks: (batch_size, padded_length, feat_dim) boolean torch tensor\n            0 indicates masked values to be predicted, 1 indicates unaffected/\"active\" feature values\n        padding_masks: (batch_size, padded_length) boolean tensor, 1 means keep vector at this position, 0 means padding\n    \"\"\"\n\n    batch_size = len(data)\n    features, labels = zip(*data)\n\n    # Stack and pad features and masks (convert 2D to 3D tensors, i.e. add batch dimension)\n    lengths = [X.shape[0] for X in features]  # original sequence length for each time series\n    if max_len is None:\n        max_len = max(lengths)\n\n    X = torch.zeros(batch_size, max_len, features[0].shape[-1])  # (batch_size, padded_length, feat_dim)\n    for i in range(batch_size):\n        end = min(lengths[i], max_len)\n        X[i, :end, :] = features[i][:end, :]\n\n    targets = torch.stack(labels, dim=0)  # (batch_size, num_labels)\n\n    padding_masks = padding_mask(torch.tensor(lengths, dtype=torch.int16),\n                                 max_len=max_len)  # (batch_size, padded_length) boolean tensor, \"1\" means keep\n\n    return X, targets, padding_masks\n\n\ndef padding_mask(lengths, max_len=None):\n    \"\"\"\n    Used to mask padded positions: creates a (batch_size, max_len) boolean mask from a tensor of sequence lengths,\n    where 1 means keep element at this position (time step)\n    \"\"\"\n    batch_size = lengths.numel()\n    max_len = max_len or lengths.max_val()  # trick works because of overloading of 'or' operator for non-boolean types\n    return (torch.arange(0, max_len, device=lengths.device)\n            .type_as(lengths)\n            .repeat(batch_size, 1)\n            .lt(lengths.unsqueeze(1)))\n\n\nclass Normalizer(object):\n    \"\"\"\n    Normalizes dataframe across ALL contained rows (time steps). Different from per-sample normalization.\n    \"\"\"\n\n    def __init__(self, norm_type='standardization', mean=None, std=None, min_val=None, max_val=None):\n        \"\"\"\n        Args:\n            norm_type: choose from:\n                \"standardization\", \"minmax\": normalizes dataframe across ALL contained rows (time steps)\n                \"per_sample_std\", \"per_sample_minmax\": normalizes each sample separately (i.e. across only its own rows)\n            mean, std, min_val, max_val: optional (num_feat,) Series of pre-computed values\n        \"\"\"\n\n        self.norm_type = norm_type\n        self.mean = mean\n        self.std = std\n        self.min_val = min_val\n        self.max_val = max_val\n\n    def normalize(self, df):\n        \"\"\"\n        Args:\n            df: input dataframe\n        Returns:\n            df: normalized dataframe\n        \"\"\"\n        if self.norm_type == \"standardization\":\n            if self.mean is None:\n                self.mean = df.mean()\n                self.std = df.std()\n            return (df - self.mean) / (self.std + np.finfo(float).eps)\n\n        elif self.norm_type == \"minmax\":\n            if self.max_val is None:\n                self.max_val = df.max()\n                self.min_val = df.min()\n            return (df - self.min_val) / (self.max_val - self.min_val + np.finfo(float).eps)\n\n        elif self.norm_type == \"per_sample_std\":\n            grouped = df.groupby(by=df.index)\n            return (df - grouped.transform('mean')) / grouped.transform('std')\n\n        elif self.norm_type == \"per_sample_minmax\":\n            grouped = df.groupby(by=df.index)\n            min_vals = grouped.transform('min')\n            return (df - min_vals) / (grouped.transform('max') - min_vals + np.finfo(float).eps)\n\n        else:\n            raise (NameError(f'Normalize method \"{self.norm_type}\" not implemented'))\n\n\ndef interpolate_missing(y):\n    \"\"\"\n    Replaces NaN values in pd.Series `y` using linear interpolation\n    \"\"\"\n    if y.isna().any():\n        y = y.interpolate(method='linear', limit_direction='both')\n    return y\n\n\ndef subsample(y, limit=256, factor=2):\n    \"\"\"\n    If a given Series is longer than `limit`, returns subsampled sequence by the specified integer factor\n    \"\"\"\n    if len(y) > limit:\n        return y[::factor].reset_index(drop=True)\n    return y\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/exp/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/exp/exp_basic.py",
    "content": "import os\nimport torch\nfrom Other_baselines.models import Autoformer, TimesNet,DLinear, Informer, iTransformer, GPT4TS, TCN, LogTrans\n\n\nclass Exp_Basic(object):\n    def __init__(self, args):\n        self.args = args\n        self.model_dict = {\n            'TimesNet': TimesNet,\n            'Autoformer': Autoformer,\n            'DLinear': DLinear,\n            'Informer': Informer,\n            'iTransformer': iTransformer,\n            'GPT4TS': GPT4TS,\n            'TCN': TCN,\n            'LogTrans': LogTrans\n        }\n\n        self.device = self._acquire_device()\n        self.model = self._build_model().to(self.device)\n\n    def _build_model(self):\n        raise NotImplementedError\n        return None\n\n    def _acquire_device(self):\n        if self.args.use_gpu:\n            os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(\n                self.args.gpu) if not self.args.use_multi_gpu else self.args.devices\n            device = torch.device('cuda:{}'.format(self.args.gpu))\n            print('Use GPU: cuda:{}'.format(self.args.gpu))\n        else:\n            device = torch.device('cpu')\n            print('Use CPU')\n        return device\n\n    def _get_data(self):\n        pass\n\n    def vali(self):\n        pass\n\n    def train(self):\n        pass\n\n    def test(self):\n        pass\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/exp/exp_basic_patch.py",
    "content": "import os\nimport torch\nimport numpy as np\n\n\nclass Exp_Basic(object):\n    def __init__(self, args):\n        self.args = args\n        self.device = self._acquire_device()\n        self.model = self._build_model().to(self.device)\n\n    def _build_model(self):\n        raise NotImplementedError\n        return None\n\n    def _acquire_device(self):\n        if self.args.use_gpu:\n            os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(\n                self.args.gpu) if not self.args.use_multi_gpu else self.args.devices\n            device = torch.device('cuda:{}'.format(self.args.gpu))\n            print('Use GPU: cuda:{}'.format(self.args.gpu))\n        else:\n            device = torch.device('cpu')\n            print('Use CPU')\n        return device\n\n    def _get_data(self):\n        pass\n\n    def vali(self):\n        pass\n\n    def train(self):\n        pass\n\n    def test(self):\n        pass\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/exp/exp_long_term_forecasting.py",
    "content": "from Other_baselines.data_provider.data_factory import data_provider\nfrom Other_baselines.exp.exp_basic import Exp_Basic\nfrom Other_baselines.utils.tools import EarlyStopping, adjust_learning_rate, visual\nfrom Other_baselines.utils.metrics import metric\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport os\nimport time\nimport warnings\nimport numpy as np\nfrom Other_baselines.utils.dtw_metric import accelerated_dtw\n\nwarnings.filterwarnings('ignore')\n\n\nclass Exp_Long_Term_Forecast(Exp_Basic):\n    def __init__(self, args):\n        super(Exp_Long_Term_Forecast, self).__init__(args)\n\n    def _build_model(self):\n        model = self.model_dict[self.args.model].Model(self.args).float()\n\n        if self.args.use_multi_gpu and self.args.use_gpu:\n            model = nn.DataParallel(model, device_ids=self.args.device_ids)\n        return model\n\n    def _get_data(self, flag):\n        data_set, data_loader = data_provider(self.args, flag)\n        return data_set, data_loader\n\n    def _select_optimizer(self):\n        model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n        return model_optim\n\n    def _select_criterion(self):\n        criterion = nn.MSELoss()\n        return criterion\n\n    def vali(self, vali_data, vali_loader, criterion):\n        total_loss = []\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float()\n\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                else:\n                    if self.args.output_attention:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                    else:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n\n                pred = outputs.detach().cpu()\n                true = batch_y.detach().cpu()\n\n                loss = criterion(pred, true)\n\n                total_loss.append(loss)\n        total_loss = np.average(total_loss)\n        self.model.train()\n        return total_loss\n\n    def train(self, setting):\n        train_data, train_loader = self._get_data(flag='train')\n        vali_data, vali_loader = self._get_data(flag='val')\n        test_data, test_loader = self._get_data(flag='test')\n\n        path = os.path.join(self.args.checkpoints, setting)\n        if not os.path.exists(path):\n            os.makedirs(path)\n\n        time_now = time.time()\n\n        train_steps = len(train_loader)\n        early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n        model_optim = self._select_optimizer()\n        criterion = self._select_criterion()\n\n        if self.args.use_amp:\n            scaler = torch.cuda.amp.GradScaler()\n\n        for epoch in range(self.args.train_epochs):\n            iter_count = 0\n            train_loss = []\n\n            self.model.train()\n            epoch_time = time.time()\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):\n\n                # print(\"batch_x.shape = \", batch_x.shape, batch_y.shape, batch_x_mark.shape, batch_y_mark.shape)\n\n                iter_count += 1\n                model_optim.zero_grad()\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float().to(self.device)\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n                        f_dim = -1 if self.args.features == 'MS' else 0\n                        outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                        batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n                        loss = criterion(outputs, batch_y)\n                        train_loss.append(loss.item())\n                else:\n                    if self.args.output_attention:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                    else:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n                    f_dim = -1 if self.args.features == 'MS' else 0\n                    outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                    batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n                    loss = criterion(outputs, batch_y)\n                    train_loss.append(loss.item())\n\n                if (i + 1) % 100 == 0:\n                    print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                if self.args.use_amp:\n                    scaler.scale(loss).backward()\n                    scaler.step(model_optim)\n                    scaler.update()\n                else:\n                    loss.backward()\n                    model_optim.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(train_loss)\n            vali_loss = self.vali(vali_data, vali_loader, criterion)\n            test_loss = self.vali(test_data, test_loader, criterion)\n\n            print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n                epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n            early_stopping(vali_loss, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n\n            adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n        best_model_path = path + '/' + 'checkpoint.pth'\n        self.model.load_state_dict(torch.load(best_model_path))\n\n        return self.model\n\n    def test(self, setting, test=0):\n        test_data, test_loader = self._get_data(flag='test')\n        if test:\n            print('loading model')\n            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n        preds = []\n        trues = []\n        folder_path = './test_results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float().to(self.device)\n\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                else:\n                    if self.args.output_attention:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n\n                    else:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, -self.args.pred_len:, :]\n                batch_y = batch_y[:, -self.args.pred_len:, :].to(self.device)\n                outputs = outputs.detach().cpu().numpy()\n                batch_y = batch_y.detach().cpu().numpy()\n                if test_data.scale and self.args.inverse:\n                    shape = outputs.shape\n                    outputs = test_data.inverse_transform(outputs.reshape(shape[0] * shape[1], -1)).reshape(shape)\n                    batch_y = test_data.inverse_transform(batch_y.reshape(shape[0] * shape[1], -1)).reshape(shape)\n        \n                outputs = outputs[:, :, f_dim:]\n                batch_y = batch_y[:, :, f_dim:]\n\n                pred = outputs\n                true = batch_y\n\n                preds.append(pred)\n                trues.append(true)\n                if i % 20 == 0:\n                    input = batch_x.detach().cpu().numpy()\n                    if test_data.scale and self.args.inverse:\n                        shape = input.shape\n                        input = test_data.inverse_transform(input.reshape(shape[0] * shape[1], -1)).reshape(shape)\n                    gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)\n                    pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)\n                    visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))\n\n        preds = np.concatenate(preds, axis=0)\n        trues = np.concatenate(trues, axis=0)\n        print('test shape:', preds.shape, trues.shape)\n        preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n        trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n        print('test shape:', preds.shape, trues.shape)\n\n        # result save\n        folder_path = './results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n        \n        # dtw calculation\n        if self.args.use_dtw:\n            dtw_list = []\n            manhattan_distance = lambda x, y: np.abs(x - y)\n            for i in range(preds.shape[0]):\n                x = preds[i].reshape(-1,1)\n                y = trues[i].reshape(-1,1)\n                if i % 100 == 0:\n                    print(\"calculating dtw iter:\", i)\n                d, _, _, _ = accelerated_dtw(x, y, dist=manhattan_distance)\n                dtw_list.append(d)\n            dtw = np.array(dtw_list).mean()\n        else:\n            dtw = -999\n            \n\n        mae, mse, rmse, mape, mspe = metric(preds, trues)\n        print('mse:{}, mae:{}, dtw:{}'.format(mse, mae, dtw))\n        f = open(\"result_long_term_forecast.txt\", 'a')\n        f.write(setting + \"  \\n\")\n        f.write('mse:{}, mae:{}, dtw:{}'.format(mse, mae, dtw))\n        f.write('\\n')\n        f.write('\\n')\n        f.close()\n\n        np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))\n        np.save(folder_path + 'pred.npy', preds)\n        np.save(folder_path + 'true.npy', trues)\n\n        return mae, mse, rmse, mape, mspe\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/exp/exp_main.py",
    "content": "from Other_baselines.data_provider.data_factory import data_provider\nfrom Other_baselines.exp.exp_basic_patch import Exp_Basic\nfrom Other_baselines.models import PatchTST_raw\nfrom Other_baselines.utils.tools import EarlyStopping, visual, test_params_flop\nfrom Other_baselines.utils.metrics import metric\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.optim import lr_scheduler\n\nimport os\nimport time\n\nimport warnings\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nwarnings.filterwarnings('ignore')\n\n\ndef adjust_learning_rate(optimizer, scheduler, epoch, args, printout=True):\n    # lr = args.learning_rate * (0.2 ** (epoch // 2))\n    if args.lradj == 'type1':\n        lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))}\n    elif args.lradj == 'type2':\n        lr_adjust = {\n            2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,\n            10: 5e-7, 15: 1e-7, 20: 5e-8\n        }\n    elif args.lradj == 'type3':\n        lr_adjust = {epoch: args.learning_rate if epoch < 3 else args.learning_rate * (0.9 ** ((epoch - 3) // 1))}\n    elif args.lradj == 'constant':\n        lr_adjust = {epoch: args.learning_rate}\n    elif args.lradj == '3':\n        lr_adjust = {epoch: args.learning_rate if epoch < 10 else args.learning_rate * 0.1}\n    elif args.lradj == '4':\n        lr_adjust = {epoch: args.learning_rate if epoch < 15 else args.learning_rate * 0.1}\n    elif args.lradj == '5':\n        lr_adjust = {epoch: args.learning_rate if epoch < 25 else args.learning_rate * 0.1}\n    elif args.lradj == '6':\n        lr_adjust = {epoch: args.learning_rate if epoch < 5 else args.learning_rate * 0.1}\n    elif args.lradj == 'TST':\n        lr_adjust = {epoch: scheduler.get_last_lr()[0]}\n\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group['lr'] = lr\n        if printout: print('Updating learning rate to {}'.format(lr))\n\n\nclass Exp_Main(Exp_Basic):\n    def __init__(self, args):\n        super(Exp_Main, self).__init__(args)\n\n    def _build_model(self):\n        model_dict = {\n            'PatchTST': PatchTST_raw,\n        }\n        model = model_dict[self.args.model].Model(self.args).float()\n\n        if self.args.use_multi_gpu and self.args.use_gpu:\n            model = nn.DataParallel(model, device_ids=self.args.device_ids)\n        return model\n\n    def _get_data(self, flag):\n        data_set, data_loader = data_provider(self.args, flag)\n        return data_set, data_loader\n\n    def _select_optimizer(self):\n        model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n        return model_optim\n\n    def _select_criterion(self):\n        criterion = nn.MSELoss()\n        return criterion\n\n    def vali(self, vali_data, vali_loader, criterion):\n        total_loss = []\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float()\n\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if 'Linear' in self.args.model or 'TST' in self.args.model:\n                            outputs = self.model(batch_x)\n                        else:\n                            if self.args.output_attention:\n                                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                            else:\n                                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                else:\n                    if 'Linear' in self.args.model or 'TST' in self.args.model:\n                        outputs = self.model(batch_x)\n                    else:\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n\n                pred = outputs.detach().cpu()\n                true = batch_y.detach().cpu()\n\n                loss = criterion(pred, true)\n\n                total_loss.append(loss)\n        total_loss = np.average(total_loss)\n        self.model.train()\n        return total_loss\n\n    def train(self, setting):\n        train_data, train_loader = self._get_data(flag='train')\n        vali_data, vali_loader = self._get_data(flag='val')\n        test_data, test_loader = self._get_data(flag='test')\n\n        path = os.path.join(self.args.checkpoints, setting)\n        if not os.path.exists(path):\n            os.makedirs(path)\n\n        time_now = time.time()\n\n        train_steps = len(train_loader)\n        early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n        model_optim = self._select_optimizer()\n        criterion = self._select_criterion()\n\n        if self.args.use_amp:\n            scaler = torch.cuda.amp.GradScaler()\n\n        scheduler = lr_scheduler.OneCycleLR(optimizer=model_optim,\n                                            steps_per_epoch=train_steps,\n                                            pct_start=self.args.pct_start,\n                                            epochs=self.args.train_epochs,\n                                            max_lr=self.args.learning_rate)\n\n        for epoch in range(self.args.train_epochs):\n            iter_count = 0\n            train_loss = []\n\n            self.model.train()\n            epoch_time = time.time()\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):\n                iter_count += 1\n                model_optim.zero_grad()\n                batch_x = batch_x.float().to(self.device)\n\n                batch_y = batch_y.float().to(self.device)\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if 'Linear' in self.args.model or 'TST' in self.args.model:\n                            outputs = self.model(batch_x)\n                        else:\n                            if self.args.output_attention:\n                                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                            else:\n                                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n                        f_dim = -1 if self.args.features == 'MS' else 0\n                        outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                        batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n                        loss = criterion(outputs, batch_y)\n                        train_loss.append(loss.item())\n                else:\n                    if 'Linear' in self.args.model or 'TST' in self.args.model:\n                        outputs = self.model(batch_x)\n                    else:\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark, batch_y)\n                    # print(outputs.shape,batch_y.shape)\n                    f_dim = -1 if self.args.features == 'MS' else 0\n                    outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                    batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n                    loss = criterion(outputs, batch_y)\n                    train_loss.append(loss.item())\n\n                if (i + 1) % 100 == 0:\n                    print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                if self.args.use_amp:\n                    scaler.scale(loss).backward()\n                    scaler.step(model_optim)\n                    scaler.update()\n                else:\n                    loss.backward()\n                    model_optim.step()\n\n                if self.args.lradj == 'TST':\n                    adjust_learning_rate(model_optim, scheduler, epoch + 1, self.args, printout=False)\n                    scheduler.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(train_loss)\n            vali_loss = self.vali(vali_data, vali_loader, criterion)\n            test_loss = self.vali(test_data, test_loader, criterion)\n\n            print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n                epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n            early_stopping(vali_loss, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n\n            if self.args.lradj != 'TST':\n                adjust_learning_rate(model_optim, scheduler, epoch + 1, self.args)\n            else:\n                print('Updating learning rate to {}'.format(scheduler.get_last_lr()[0]))\n\n        best_model_path = path + '/' + 'checkpoint.pth'\n        self.model.load_state_dict(torch.load(best_model_path))\n\n        return self.model\n\n    def test(self, setting, test=0):\n        test_data, test_loader = self._get_data(flag='test')\n\n        if test:\n            print('loading model')\n            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n        preds = []\n        trues = []\n        inputx = []\n        folder_path = './test_results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float().to(self.device)\n\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if 'Linear' in self.args.model or 'TST' in self.args.model:\n                            outputs = self.model(batch_x)\n                        else:\n                            if self.args.output_attention:\n                                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                            else:\n                                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                else:\n                    if 'Linear' in self.args.model or 'TST' in self.args.model:\n                        outputs = self.model(batch_x)\n                    else:\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n                f_dim = -1 if self.args.features == 'MS' else 0\n                # print(\"outputs.shape = \", outputs.shape,batch_y.shape)\n                outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n                outputs = outputs.detach().cpu().numpy()\n                batch_y = batch_y.detach().cpu().numpy()\n\n                pred = outputs  # outputs.detach().cpu().numpy()  # .squeeze()\n                true = batch_y  # batch_y.detach().cpu().numpy()  # .squeeze()\n\n                preds.append(pred)\n\n                # print(\"preds.shape = \", np.array(preds).shape)\n                trues.append(true)\n                inputx.append(batch_x.detach().cpu().numpy())\n                if i % 20 == 0:\n                    input = batch_x.detach().cpu().numpy()\n                    gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)\n                    pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)\n                    visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))\n\n        if self.args.test_flop:\n            test_params_flop((batch_x.shape[1], batch_x.shape[2]))\n            exit()\n        # preds = np.array(preds)\n        # trues = np.array(trues)\n        # inputx = np.array(inputx)\n\n        preds = np.concatenate(preds, axis=0)\n        trues = np.concatenate(trues, axis=0)\n        inputx = np.concatenate(inputx)\n\n\n        # print('test shape:', preds.shape, trues.shape)\n        preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n        trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n        inputx = inputx.reshape(-1, inputx.shape[-2], inputx.shape[-1])\n\n        # print(\"preds.shape = \", preds.shape)\n        # preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n        # trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n        # inputx = inputx.reshape(-1, inputx.shape[-2], inputx.shape[-1])\n\n        # result save\n        folder_path = './results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        mae, mse, rmse, mape, mspe = metric(preds, trues)\n        print('mse:{}, mae:{}, mspe:{}'.format(mse, mae, mspe))\n        f = open(\"result.txt\", 'a')\n        f.write(setting + \"  \\n\")\n        f.write('mse:{}, mae:{}, mspe:{}'.format(mse, mae, mspe))\n        f.write('\\n')\n        f.write('\\n')\n        f.close()\n\n        # np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe,rse, corr]))\n        np.save(folder_path + 'pred.npy', preds)\n        # np.save(folder_path + 'true.npy', trues)\n        # np.save(folder_path + 'x.npy', inputx)\n        return mae, mse, rmse, mape, mspe\n\n    def predict(self, setting, load=False):\n        pred_data, pred_loader = self._get_data(flag='pred')\n\n        if load:\n            path = os.path.join(self.args.checkpoints, setting)\n            best_model_path = path + '/' + 'checkpoint.pth'\n            self.model.load_state_dict(torch.load(best_model_path))\n\n        preds = []\n\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float()\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros([batch_y.shape[0], self.args.pred_len, batch_y.shape[2]]).float().to(\n                    batch_y.device)\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if 'Linear' in self.args.model or 'TST' in self.args.model:\n                            outputs = self.model(batch_x)\n                        else:\n                            if self.args.output_attention:\n                                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                            else:\n                                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                else:\n                    if 'Linear' in self.args.model or 'TST' in self.args.model:\n                        outputs = self.model(batch_x)\n                    else:\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                pred = outputs.detach().cpu().numpy()  # .squeeze()\n                preds.append(pred)\n\n        preds = np.array(preds)\n        preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n\n        # result save\n        folder_path = './results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        np.save(folder_path + 'real_prediction.npy', preds)\n\n        return\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/exp/exp_short_term_forecasting.py",
    "content": "from Other_baselines.data_provider.data_factory import data_provider\nfrom Other_baselines.data_provider.m4 import M4Meta\nfrom Other_baselines.exp.exp_basic import Exp_Basic\nfrom Other_baselines.utils.tools import EarlyStopping, adjust_learning_rate, visual\nfrom Other_baselines.utils.losses import mape_loss, mase_loss, smape_loss\nfrom Other_baselines.utils.m4_summary import M4Summary\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport os\nimport time\nimport warnings\nimport numpy as np\nimport pandas\n\nwarnings.filterwarnings('ignore')\n\n\nclass Exp_Short_Term_Forecast(Exp_Basic):\n    def __init__(self, args):\n        super(Exp_Short_Term_Forecast, self).__init__(args)\n\n    def _build_model(self):\n        if self.args.data == 'm4':\n            self.args.pred_len = M4Meta.horizons_map[self.args.seasonal_patterns]  # Up to M4 config\n            self.args.seq_len = 2 * self.args.pred_len  # input_len = 2*pred_len\n            self.args.label_len = self.args.pred_len\n            self.args.frequency_map = M4Meta.frequency_map[self.args.seasonal_patterns]\n        model = self.model_dict[self.args.model].Model(self.args).float()\n\n        if self.args.use_multi_gpu and self.args.use_gpu:\n            model = nn.DataParallel(model, device_ids=self.args.device_ids)\n        return model\n\n    def _get_data(self, flag):\n        data_set, data_loader = data_provider(self.args, flag)\n        return data_set, data_loader\n\n    def _select_optimizer(self):\n        model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n        return model_optim\n\n    def _select_criterion(self, loss_name='MSE'):\n        if loss_name == 'MSE':\n            return nn.MSELoss()\n        elif loss_name == 'MAPE':\n            return mape_loss()\n        elif loss_name == 'MASE':\n            return mase_loss()\n        elif loss_name == 'SMAPE':\n            return smape_loss()\n\n    def train(self, setting):\n        train_data, train_loader = self._get_data(flag='train')\n        vali_data, vali_loader = self._get_data(flag='val')\n\n        path = os.path.join(self.args.checkpoints, setting)\n        if not os.path.exists(path):\n            os.makedirs(path)\n\n        time_now = time.time()\n\n        train_steps = len(train_loader)\n        early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n        model_optim = self._select_optimizer()\n        criterion = self._select_criterion(self.args.loss)\n        mse = nn.MSELoss()\n\n        for epoch in range(self.args.train_epochs):\n            iter_count = 0\n            train_loss = []\n\n            self.model.train()\n            epoch_time = time.time()\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):\n                iter_count += 1\n                model_optim.zero_grad()\n                batch_x = batch_x.float().to(self.device)\n\n                batch_y = batch_y.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n\n                outputs = self.model(batch_x, None, dec_inp, None)\n\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n\n                batch_y_mark = batch_y_mark[:, -self.args.pred_len:, f_dim:].to(self.device)\n                loss_value = criterion(batch_x, self.args.frequency_map, outputs, batch_y, batch_y_mark)\n                loss_sharpness = mse((outputs[:, 1:, :] - outputs[:, :-1, :]), (batch_y[:, 1:, :] - batch_y[:, :-1, :]))\n                loss = loss_value  # + loss_sharpness * 1e-5\n                train_loss.append(loss.item())\n\n                if (i + 1) % 100 == 0:\n                    print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                loss.backward()\n                model_optim.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(train_loss)\n            vali_loss = self.vali(train_loader, vali_loader, criterion)\n            test_loss = vali_loss\n            print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n                epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n            early_stopping(vali_loss, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n\n            adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n        best_model_path = path + '/' + 'checkpoint.pth'\n        self.model.load_state_dict(torch.load(best_model_path))\n\n        return self.model\n\n    def vali(self, train_loader, vali_loader, criterion):\n        x, _ = train_loader.dataset.last_insample_window()\n        y = vali_loader.dataset.timeseries\n        x = torch.tensor(x, dtype=torch.float32).to(self.device)\n        x = x.unsqueeze(-1)\n\n        self.model.eval()\n        with torch.no_grad():\n            # decoder input\n            B, _, C = x.shape\n            dec_inp = torch.zeros((B, self.args.pred_len, C)).float().to(self.device)\n            dec_inp = torch.cat([x[:, -self.args.label_len:, :], dec_inp], dim=1).float()\n            # encoder - decoder\n            outputs = torch.zeros((B, self.args.pred_len, C)).float()  # .to(self.device)\n            id_list = np.arange(0, B, 500)  # validation set size\n            id_list = np.append(id_list, B)\n            for i in range(len(id_list) - 1):\n                outputs[id_list[i]:id_list[i + 1], :, :] = self.model(x[id_list[i]:id_list[i + 1]], None,\n                                                                      dec_inp[id_list[i]:id_list[i + 1]],\n                                                                      None).detach().cpu()\n            f_dim = -1 if self.args.features == 'MS' else 0\n            outputs = outputs[:, -self.args.pred_len:, f_dim:]\n            pred = outputs\n            true = torch.from_numpy(np.array(y))\n            batch_y_mark = torch.ones(true.shape)\n\n            loss = criterion(x.detach().cpu()[:, :, 0], self.args.frequency_map, pred[:, :, 0], true, batch_y_mark)\n\n        self.model.train()\n        return loss\n\n    def test(self, setting, test=0):\n        _, train_loader = self._get_data(flag='train')\n        _, test_loader = self._get_data(flag='test')\n        x, _ = train_loader.dataset.last_insample_window()\n        y = test_loader.dataset.timeseries\n        x = torch.tensor(x, dtype=torch.float32).to(self.device)\n        x = x.unsqueeze(-1)\n\n        if test:\n            print('loading model')\n            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n        folder_path = './test_results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        self.model.eval()\n        with torch.no_grad():\n            B, _, C = x.shape\n            dec_inp = torch.zeros((B, self.args.pred_len, C)).float().to(self.device)\n            dec_inp = torch.cat([x[:, -self.args.label_len:, :], dec_inp], dim=1).float()\n            # encoder - decoder\n            outputs = torch.zeros((B, self.args.pred_len, C)).float().to(self.device)\n            id_list = np.arange(0, B, 1)\n            id_list = np.append(id_list, B)\n            for i in range(len(id_list) - 1):\n                outputs[id_list[i]:id_list[i + 1], :, :] = self.model(x[id_list[i]:id_list[i + 1]], None,\n                                                                      dec_inp[id_list[i]:id_list[i + 1]], None)\n\n                if id_list[i] % 1000 == 0:\n                    print(id_list[i])\n\n            f_dim = -1 if self.args.features == 'MS' else 0\n            outputs = outputs[:, -self.args.pred_len:, f_dim:]\n            outputs = outputs.detach().cpu().numpy()\n\n            preds = outputs\n            trues = y\n            x = x.detach().cpu().numpy()\n\n            for i in range(0, preds.shape[0], preds.shape[0] // 10):\n                gt = np.concatenate((x[i, :, 0], trues[i]), axis=0)\n                pd = np.concatenate((x[i, :, 0], preds[i, :, 0]), axis=0)\n                visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))\n\n        print('test shape:', preds.shape)\n\n        # result save\n        folder_path = './m4_results/' + self.args.model + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        forecasts_df = pandas.DataFrame(preds[:, :, 0], columns=[f'V{i + 1}' for i in range(self.args.pred_len)])\n        forecasts_df.index = test_loader.dataset.ids[:preds.shape[0]]\n        forecasts_df.index.name = 'id'\n        forecasts_df.set_index(forecasts_df.columns[0], inplace=True)\n        forecasts_df.to_csv(folder_path + self.args.seasonal_patterns + '_forecast.csv')\n\n        print(self.args.model)\n        file_path = './m4_results/' + self.args.model + '/'\n        if 'Weekly_forecast.csv' in os.listdir(file_path) \\\n                and 'Monthly_forecast.csv' in os.listdir(file_path) \\\n                and 'Yearly_forecast.csv' in os.listdir(file_path) \\\n                and 'Daily_forecast.csv' in os.listdir(file_path) \\\n                and 'Hourly_forecast.csv' in os.listdir(file_path) \\\n                and 'Quarterly_forecast.csv' in os.listdir(file_path):\n            m4_summary = M4Summary(file_path, self.args.root_path)\n            # m4_forecast.set_index(m4_winner_forecast.columns[0], inplace=True)\n            smape_results, owa_results, mape, mase = m4_summary.evaluate()\n            print('smape:', smape_results)\n            print('mape:', mape)\n            print('mase:', mase)\n            print('owa:', owa_results)\n        else:\n            print('After all 6 tasks are finished, you can calculate the averaged index')\n\n\n        return\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/AutoCorrelation.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom math import sqrt\nimport os\n\n\nclass AutoCorrelation(nn.Module):\n    \"\"\"\n    AutoCorrelation Mechanism with the following two phases:\n    (1) period-based dependencies discovery\n    (2) time delay aggregation\n    This block can replace the self-attention family mechanism seamlessly.\n    \"\"\"\n\n    def __init__(self, mask_flag=True, factor=1, scale=None, attention_dropout=0.1, output_attention=False):\n        super(AutoCorrelation, self).__init__()\n        self.factor = factor\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def time_delay_agg_training(self, values, corr):\n        \"\"\"\n        SpeedUp version of Autocorrelation (a batch-normalization style design)\n        This is for the training phase.\n        \"\"\"\n        head = values.shape[1]\n        channel = values.shape[2]\n        length = values.shape[3]\n        # find top k\n        top_k = int(self.factor * math.log(length))\n        mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)\n        index = torch.topk(torch.mean(mean_value, dim=0), top_k, dim=-1)[1]\n        weights = torch.stack([mean_value[:, index[i]] for i in range(top_k)], dim=-1)\n        # update corr\n        tmp_corr = torch.softmax(weights, dim=-1)\n        # aggregation\n        tmp_values = values\n        delays_agg = torch.zeros_like(values).float()\n        for i in range(top_k):\n            pattern = torch.roll(tmp_values, -int(index[i]), -1)\n            delays_agg = delays_agg + pattern * \\\n                         (tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length))\n        return delays_agg\n\n    def time_delay_agg_inference(self, values, corr):\n        \"\"\"\n        SpeedUp version of Autocorrelation (a batch-normalization style design)\n        This is for the inference phase.\n        \"\"\"\n        batch = values.shape[0]\n        head = values.shape[1]\n        channel = values.shape[2]\n        length = values.shape[3]\n        # index init\n        init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(batch, head, channel, 1).cuda()\n        # find top k\n        top_k = int(self.factor * math.log(length))\n        mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)\n        weights, delay = torch.topk(mean_value, top_k, dim=-1)\n        # update corr\n        tmp_corr = torch.softmax(weights, dim=-1)\n        # aggregation\n        tmp_values = values.repeat(1, 1, 1, 2)\n        delays_agg = torch.zeros_like(values).float()\n        for i in range(top_k):\n            tmp_delay = init_index + delay[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length)\n            pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)\n            delays_agg = delays_agg + pattern * \\\n                         (tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length))\n        return delays_agg\n\n    def time_delay_agg_full(self, values, corr):\n        \"\"\"\n        Standard version of Autocorrelation\n        \"\"\"\n        batch = values.shape[0]\n        head = values.shape[1]\n        channel = values.shape[2]\n        length = values.shape[3]\n        # index init\n        init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(batch, head, channel, 1).cuda()\n        # find top k\n        top_k = int(self.factor * math.log(length))\n        weights, delay = torch.topk(corr, top_k, dim=-1)\n        # update corr\n        tmp_corr = torch.softmax(weights, dim=-1)\n        # aggregation\n        tmp_values = values.repeat(1, 1, 1, 2)\n        delays_agg = torch.zeros_like(values).float()\n        for i in range(top_k):\n            tmp_delay = init_index + delay[..., i].unsqueeze(-1)\n            pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)\n            delays_agg = delays_agg + pattern * (tmp_corr[..., i].unsqueeze(-1))\n        return delays_agg\n\n    def forward(self, queries, keys, values, attn_mask):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        if L > S:\n            zeros = torch.zeros_like(queries[:, :(L - S), :]).float()\n            values = torch.cat([values, zeros], dim=1)\n            keys = torch.cat([keys, zeros], dim=1)\n        else:\n            values = values[:, :L, :, :]\n            keys = keys[:, :L, :, :]\n\n        # period-based dependencies\n        q_fft = torch.fft.rfft(queries.permute(0, 2, 3, 1).contiguous(), dim=-1)\n        k_fft = torch.fft.rfft(keys.permute(0, 2, 3, 1).contiguous(), dim=-1)\n        res = q_fft * torch.conj(k_fft)\n        corr = torch.fft.irfft(res, dim=-1)\n\n        # time delay agg\n        if self.training:\n            V = self.time_delay_agg_training(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2)\n        else:\n            V = self.time_delay_agg_inference(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2)\n\n        if self.output_attention:\n            return (V.contiguous(), corr.permute(0, 3, 1, 2))\n        else:\n            return (V.contiguous(), None)\n\n\nclass AutoCorrelationLayer(nn.Module):\n    def __init__(self, correlation, d_model, n_heads, d_keys=None,\n                 d_values=None):\n        super(AutoCorrelationLayer, self).__init__()\n\n        d_keys = d_keys or (d_model // n_heads)\n        d_values = d_values or (d_model // n_heads)\n\n        self.inner_correlation = correlation\n        self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.value_projection = nn.Linear(d_model, d_values * n_heads)\n        self.out_projection = nn.Linear(d_values * n_heads, d_model)\n        self.n_heads = n_heads\n\n    def forward(self, queries, keys, values, attn_mask):\n        B, L, _ = queries.shape\n        _, S, _ = keys.shape\n        H = self.n_heads\n\n        queries = self.query_projection(queries).view(B, L, H, -1)\n        keys = self.key_projection(keys).view(B, S, H, -1)\n        values = self.value_projection(values).view(B, S, H, -1)\n\n        out, attn = self.inner_correlation(\n            queries,\n            keys,\n            values,\n            attn_mask\n        )\n        out = out.view(B, L, -1)\n\n        return self.out_projection(out), attn\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/Autoformer_EncDec.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass my_Layernorm(nn.Module):\n    \"\"\"\n    Special designed layernorm for the seasonal part\n    \"\"\"\n\n    def __init__(self, channels):\n        super(my_Layernorm, self).__init__()\n        self.layernorm = nn.LayerNorm(channels)\n\n    def forward(self, x):\n        x_hat = self.layernorm(x)\n        bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1)\n        return x_hat - bias\n\n\nclass moving_avg(nn.Module):\n    \"\"\"\n    Moving average block to highlight the trend of time series\n    \"\"\"\n\n    def __init__(self, kernel_size, stride):\n        super(moving_avg, self).__init__()\n        self.kernel_size = kernel_size\n        self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)\n\n    def forward(self, x):\n        # padding on the both ends of time series\n        front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)\n        end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)\n        x = torch.cat([front, x, end], dim=1)\n        x = self.avg(x.permute(0, 2, 1))\n        x = x.permute(0, 2, 1)\n        return x\n\n\nclass series_decomp(nn.Module):\n    \"\"\"\n    Series decomposition block\n    \"\"\"\n\n    def __init__(self, kernel_size):\n        super(series_decomp, self).__init__()\n        self.moving_avg = moving_avg(kernel_size, stride=1)\n\n    def forward(self, x):\n        moving_mean = self.moving_avg(x)\n        res = x - moving_mean\n        return res, moving_mean\n\n\nclass series_decomp_multi(nn.Module):\n    \"\"\"\n    Multiple Series decomposition block from FEDformer\n    \"\"\"\n\n    def __init__(self, kernel_size):\n        super(series_decomp_multi, self).__init__()\n        self.kernel_size = kernel_size\n        self.series_decomp = [series_decomp(kernel) for kernel in kernel_size]\n\n    def forward(self, x):\n        moving_mean = []\n        res = []\n        for func in self.series_decomp:\n            sea, moving_avg = func(x)\n            moving_mean.append(moving_avg)\n            res.append(sea)\n\n        sea = sum(res) / len(res)\n        moving_mean = sum(moving_mean) / len(moving_mean)\n        return sea, moving_mean\n\n\nclass EncoderLayer(nn.Module):\n    \"\"\"\n    Autoformer encoder layer with the progressive decomposition architecture\n    \"\"\"\n\n    def __init__(self, attention, d_model, d_ff=None, moving_avg=25, dropout=0.1, activation=\"relu\"):\n        super(EncoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.attention = attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False)\n        self.decomp1 = series_decomp(moving_avg)\n        self.decomp2 = series_decomp(moving_avg)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, attn_mask=None):\n        new_x, attn = self.attention(\n            x, x, x,\n            attn_mask=attn_mask\n        )\n        x = x + self.dropout(new_x)\n        x, _ = self.decomp1(x)\n        y = x\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n        res, _ = self.decomp2(x + y)\n        return res, attn\n\n\nclass Encoder(nn.Module):\n    \"\"\"\n    Autoformer encoder\n    \"\"\"\n\n    def __init__(self, attn_layers, conv_layers=None, norm_layer=None):\n        super(Encoder, self).__init__()\n        self.attn_layers = nn.ModuleList(attn_layers)\n        self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None\n        self.norm = norm_layer\n\n    def forward(self, x, attn_mask=None):\n        attns = []\n        if self.conv_layers is not None:\n            for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):\n                x, attn = attn_layer(x, attn_mask=attn_mask)\n                x = conv_layer(x)\n                attns.append(attn)\n            x, attn = self.attn_layers[-1](x)\n            attns.append(attn)\n        else:\n            for attn_layer in self.attn_layers:\n                x, attn = attn_layer(x, attn_mask=attn_mask)\n                attns.append(attn)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        return x, attns\n\n\nclass DecoderLayer(nn.Module):\n    \"\"\"\n    Autoformer decoder layer with the progressive decomposition architecture\n    \"\"\"\n\n    def __init__(self, self_attention, cross_attention, d_model, c_out, d_ff=None,\n                 moving_avg=25, dropout=0.1, activation=\"relu\"):\n        super(DecoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.self_attention = self_attention\n        self.cross_attention = cross_attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False)\n        self.decomp1 = series_decomp(moving_avg)\n        self.decomp2 = series_decomp(moving_avg)\n        self.decomp3 = series_decomp(moving_avg)\n        self.dropout = nn.Dropout(dropout)\n        self.projection = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=3, stride=1, padding=1,\n                                    padding_mode='circular', bias=False)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None):\n        x = x + self.dropout(self.self_attention(\n            x, x, x,\n            attn_mask=x_mask\n        )[0])\n        x, trend1 = self.decomp1(x)\n        x = x + self.dropout(self.cross_attention(\n            x, cross, cross,\n            attn_mask=cross_mask\n        )[0])\n        x, trend2 = self.decomp2(x)\n        y = x\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n        x, trend3 = self.decomp3(x + y)\n\n        residual_trend = trend1 + trend2 + trend3\n        residual_trend = self.projection(residual_trend.permute(0, 2, 1)).transpose(1, 2)\n        return x, residual_trend\n\n\nclass Decoder(nn.Module):\n    \"\"\"\n    Autoformer encoder\n    \"\"\"\n\n    def __init__(self, layers, norm_layer=None, projection=None):\n        super(Decoder, self).__init__()\n        self.layers = nn.ModuleList(layers)\n        self.norm = norm_layer\n        self.projection = projection\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None):\n        for layer in self.layers:\n            x, residual_trend = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask)\n            trend = trend + residual_trend\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        if self.projection is not None:\n            x = self.projection(x)\n        return x, trend\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/Conv_Blocks.py",
    "content": "import torch\nimport torch.nn as nn\n\n\nclass Inception_Block_V1(nn.Module):\n    def __init__(self, in_channels, out_channels, num_kernels=6, init_weight=True):\n        super(Inception_Block_V1, self).__init__()\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_kernels = num_kernels\n        kernels = []\n        for i in range(self.num_kernels):\n            kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=2 * i + 1, padding=i))\n        self.kernels = nn.ModuleList(kernels)\n        if init_weight:\n            self._initialize_weights()\n\n    def _initialize_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n                if m.bias is not None:\n                    nn.init.constant_(m.bias, 0)\n\n    def forward(self, x):\n        res_list = []\n        for i in range(self.num_kernels):\n            res_list.append(self.kernels[i](x))\n        res = torch.stack(res_list, dim=-1).mean(-1)\n        return res\n\n\nclass Inception_Block_V2(nn.Module):\n    def __init__(self, in_channels, out_channels, num_kernels=6, init_weight=True):\n        super(Inception_Block_V2, self).__init__()\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_kernels = num_kernels\n        kernels = []\n        for i in range(self.num_kernels // 2):\n            kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=[1, 2 * i + 3], padding=[0, i + 1]))\n            kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=[2 * i + 3, 1], padding=[i + 1, 0]))\n        kernels.append(nn.Conv2d(in_channels, out_channels, kernel_size=1))\n        self.kernels = nn.ModuleList(kernels)\n        if init_weight:\n            self._initialize_weights()\n\n    def _initialize_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n                if m.bias is not None:\n                    nn.init.constant_(m.bias, 0)\n\n    def forward(self, x):\n        res_list = []\n        for i in range(self.num_kernels + 1):\n            res_list.append(self.kernels[i](x))\n        res = torch.stack(res_list, dim=-1).mean(-1)\n        return res\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/Embed.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import weight_norm\nimport math\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, max_len=5000):\n        super(PositionalEmbedding, self).__init__()\n        # Compute the positional encodings once in log space.\n        pe = torch.zeros(max_len, d_model).float()\n        pe.require_grad = False\n\n        position = torch.arange(0, max_len).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n\n        pe = pe.unsqueeze(0)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        return self.pe[:, :x.size(1)]\n\n\nclass TokenEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(TokenEmbedding, self).__init__()\n        padding = 1 if torch.__version__ >= '1.5.0' else 2\n        self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,\n                                   kernel_size=3, padding=padding, padding_mode='circular', bias=False)\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.kaiming_normal_(\n                    m.weight, mode='fan_in', nonlinearity='leaky_relu')\n\n    def forward(self, x):\n        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)\n        return x\n\n\nclass FixedEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(FixedEmbedding, self).__init__()\n\n        w = torch.zeros(c_in, d_model).float()\n        w.require_grad = False\n\n        position = torch.arange(0, c_in).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float()\n                    * -(math.log(10000.0) / d_model)).exp()\n\n        w[:, 0::2] = torch.sin(position * div_term)\n        w[:, 1::2] = torch.cos(position * div_term)\n\n        self.emb = nn.Embedding(c_in, d_model)\n        self.emb.weight = nn.Parameter(w, requires_grad=False)\n\n    def forward(self, x):\n        return self.emb(x).detach()\n\n\nclass TemporalEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='fixed', freq='h'):\n        super(TemporalEmbedding, self).__init__()\n\n        minute_size = 4\n        hour_size = 24\n        weekday_size = 7\n        day_size = 32\n        month_size = 13\n\n        Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding\n        if freq == 't':\n            self.minute_embed = Embed(minute_size, d_model)\n        self.hour_embed = Embed(hour_size, d_model)\n        self.weekday_embed = Embed(weekday_size, d_model)\n        self.day_embed = Embed(day_size, d_model)\n        self.month_embed = Embed(month_size, d_model)\n\n    def forward(self, x):\n        x = x.long()\n        minute_x = self.minute_embed(x[:, :, 4]) if hasattr(\n            self, 'minute_embed') else 0.\n        hour_x = self.hour_embed(x[:, :, 3])\n        weekday_x = self.weekday_embed(x[:, :, 2])\n        day_x = self.day_embed(x[:, :, 1])\n        month_x = self.month_embed(x[:, :, 0])\n\n        return hour_x + weekday_x + day_x + month_x + minute_x\n\n\nclass TimeFeatureEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='timeF', freq='h'):\n        super(TimeFeatureEmbedding, self).__init__()\n\n        freq_map = {'h': 4, 't': 5, 's': 6,\n                    'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3}\n        d_inp = freq_map[freq]\n        self.embed = nn.Linear(d_inp, d_model, bias=False)\n\n    def forward(self, x):\n        return self.embed(x)\n\n\nclass DataEmbedding(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x) + self.position_embedding(x)\n        else:\n            x = self.value_embedding(\n                x) + self.temporal_embedding(x_mark) + self.position_embedding(x)\n        return self.dropout(x)\n\n\nclass DataEmbedding_inverted(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_inverted, self).__init__()\n        self.value_embedding = nn.Linear(c_in, d_model)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        x = x.permute(0, 2, 1)\n        # x: [Batch Variate Time]\n        if x_mark is None:\n            x = self.value_embedding(x)\n        else:\n            x = self.value_embedding(torch.cat([x, x_mark.permute(0, 2, 1)], 1))\n        # x: [Batch Variate d_model]\n        return self.dropout(x)\n\n\nclass DataEmbedding_wo_pos(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_wo_pos, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        if x_mark is None:\n            x = self.value_embedding(x)\n        else:\n            x = self.value_embedding(x) + self.temporal_embedding(x_mark)\n        return self.dropout(x)\n\n\nclass PatchEmbedding(nn.Module):\n    def __init__(self, d_model, patch_len, stride, padding, dropout):\n        super(PatchEmbedding, self).__init__()\n        # Patching\n        self.patch_len = patch_len\n        self.stride = stride\n        self.padding_patch_layer = nn.ReplicationPad1d((0, padding))\n\n        # Backbone, Input encoding: projection of feature vectors onto a d-dim vector space\n        self.value_embedding = nn.Linear(patch_len, d_model, bias=False)\n\n        # Positional embedding\n        self.position_embedding = PositionalEmbedding(d_model)\n\n        # Residual dropout\n        self.dropout = nn.Dropout(dropout)\n\n    def forward(self, x):\n        # do patching\n        n_vars = x.shape[1]\n        x = self.padding_patch_layer(x)\n        x = x.unfold(dimension=-1, size=self.patch_len, step=self.stride)\n        x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3]))\n        # Input encoding\n        x = self.value_embedding(x) + self.position_embedding(x)\n        return self.dropout(x), n_vars\n\n\nclass DataEmbedding_wo_time(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_wo_time, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x):\n        x = self.value_embedding(x) + self.position_embedding(x)\n        return self.dropout(x)"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/PatchTST_backbone.py",
    "content": "# Cell\nfrom typing import Callable, Optional\nimport torch\nfrom torch import nn\nfrom torch import Tensor\nimport torch.nn.functional as F\nimport numpy as np\n\n#from collections import OrderedDict\nfrom Other_baselines.layers.PatchTST_layers import *\nfrom Other_baselines.layers.RevIN import RevIN\n\n# Cell\nclass PatchTST_backbone(nn.Module):\n    def __init__(self, c_in:int, context_window:int, target_window:int, patch_len:int, stride:int, max_seq_len:Optional[int]=1024, \n                 n_layers:int=3, d_model=128, n_heads=16, d_k:Optional[int]=None, d_v:Optional[int]=None,\n                 d_ff:int=256, norm:str='BatchNorm', attn_dropout:float=0., dropout:float=0., act:str=\"gelu\", key_padding_mask:bool='auto',\n                 padding_var:Optional[int]=None, attn_mask:Optional[Tensor]=None, res_attention:bool=True, pre_norm:bool=False, store_attn:bool=False,\n                 pe:str='zeros', learn_pe:bool=True, fc_dropout:float=0., head_dropout = 0, padding_patch = None,\n                 pretrain_head:bool=False, head_type = 'flatten', individual = False, revin = True, affine = True, subtract_last = False,\n                 verbose:bool=False, **kwargs):\n        \n        super().__init__()\n        \n        # RevIn\n        self.revin = revin\n        if self.revin: self.revin_layer = RevIN(c_in, affine=affine, subtract_last=subtract_last)\n        \n        # Patching\n        self.patch_len = patch_len\n        self.stride = stride\n        self.padding_patch = padding_patch\n        patch_num = int((context_window - patch_len)/stride + 1)\n        if padding_patch == 'end': # can be modified to general case\n            self.padding_patch_layer = nn.ReplicationPad1d((0, stride)) \n            patch_num += 1\n        \n        # Backbone \n        self.backbone = TSTiEncoder(c_in, patch_num=patch_num, patch_len=patch_len, max_seq_len=max_seq_len,\n                                n_layers=n_layers, d_model=d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff,\n                                attn_dropout=attn_dropout, dropout=dropout, act=act, key_padding_mask=key_padding_mask, padding_var=padding_var,\n                                attn_mask=attn_mask, res_attention=res_attention, pre_norm=pre_norm, store_attn=store_attn,\n                                pe=pe, learn_pe=learn_pe, verbose=verbose, **kwargs)\n\n        # Head\n        self.head_nf = d_model * patch_num\n        self.n_vars = c_in\n        self.pretrain_head = pretrain_head\n        self.head_type = head_type\n        self.individual = individual\n\n        if self.pretrain_head: \n            self.head = self.create_pretrain_head(self.head_nf, c_in, fc_dropout) # custom head passed as a partial func with all its kwargs\n        elif head_type == 'flatten': \n            self.head = Flatten_Head(self.individual, self.n_vars, self.head_nf, target_window, head_dropout=head_dropout)\n        \n    \n    def forward(self, z):                                                                   # z: [bs x nvars x seq_len]\n        # norm\n        if self.revin: \n            z = z.permute(0,2,1)\n            z = self.revin_layer(z, 'norm')\n            z = z.permute(0,2,1)\n            \n        # do patching\n        if self.padding_patch == 'end':\n            z = self.padding_patch_layer(z)\n        z = z.unfold(dimension=-1, size=self.patch_len, step=self.stride)                   # z: [bs x nvars x patch_num x patch_len]\n        z = z.permute(0,1,3,2)                                                              # z: [bs x nvars x patch_len x patch_num]\n        \n        # model\n        z = self.backbone(z)                                                                # z: [bs x nvars x d_model x patch_num]\n        z = self.head(z)                                                                    # z: [bs x nvars x target_window] \n        \n        # denorm\n        if self.revin: \n            z = z.permute(0,2,1)\n            z = self.revin_layer(z, 'denorm')\n            z = z.permute(0,2,1)\n        return z\n    \n    def create_pretrain_head(self, head_nf, vars, dropout):\n        return nn.Sequential(nn.Dropout(dropout),\n                    nn.Conv1d(head_nf, vars, 1)\n                    )\n\n\nclass Flatten_Head(nn.Module):\n    def __init__(self, individual, n_vars, nf, target_window, head_dropout=0):\n        super().__init__()\n        \n        self.individual = individual\n        self.n_vars = n_vars\n        \n        if self.individual:\n            self.linears = nn.ModuleList()\n            self.dropouts = nn.ModuleList()\n            self.flattens = nn.ModuleList()\n            for i in range(self.n_vars):\n                self.flattens.append(nn.Flatten(start_dim=-2))\n                self.linears.append(nn.Linear(nf, target_window))\n                self.dropouts.append(nn.Dropout(head_dropout))\n        else:\n            self.flatten = nn.Flatten(start_dim=-2)\n            self.linear = nn.Linear(nf, target_window)\n            self.dropout = nn.Dropout(head_dropout)\n            \n    def forward(self, x):                                 # x: [bs x nvars x d_model x patch_num]\n        if self.individual:\n            x_out = []\n            for i in range(self.n_vars):\n                z = self.flattens[i](x[:,i,:,:])          # z: [bs x d_model * patch_num]\n                z = self.linears[i](z)                    # z: [bs x target_window]\n                z = self.dropouts[i](z)\n                x_out.append(z)\n            x = torch.stack(x_out, dim=1)                 # x: [bs x nvars x target_window]\n        else:\n            x = self.flatten(x)\n            x = self.linear(x)\n            x = self.dropout(x)\n        return x\n        \n        \n    \n    \nclass TSTiEncoder(nn.Module):  #i means channel-independent\n    def __init__(self, c_in, patch_num, patch_len, max_seq_len=1024,\n                 n_layers=3, d_model=128, n_heads=16, d_k=None, d_v=None,\n                 d_ff=256, norm='BatchNorm', attn_dropout=0., dropout=0., act=\"gelu\", store_attn=False,\n                 key_padding_mask='auto', padding_var=None, attn_mask=None, res_attention=True, pre_norm=False,\n                 pe='zeros', learn_pe=True, verbose=False, **kwargs):\n        \n        \n        super().__init__()\n        \n        self.patch_num = patch_num\n        self.patch_len = patch_len\n        \n        # Input encoding\n        q_len = patch_num\n        self.W_P = nn.Linear(patch_len, d_model)        # Eq 1: projection of feature vectors onto a d-dim vector space\n        self.seq_len = q_len\n\n        # Positional encoding\n        self.W_pos = positional_encoding(pe, learn_pe, q_len, d_model)\n\n        # Residual dropout\n        self.dropout = nn.Dropout(dropout)\n\n        # Encoder\n        self.encoder = TSTEncoder(q_len, d_model, n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm, attn_dropout=attn_dropout, dropout=dropout,\n                                   pre_norm=pre_norm, activation=act, res_attention=res_attention, n_layers=n_layers, store_attn=store_attn)\n\n        \n    def forward(self, x) -> Tensor:                                              # x: [bs x nvars x patch_len x patch_num]\n        \n        n_vars = x.shape[1]\n        # Input encoding\n        x = x.permute(0,1,3,2)                                                   # x: [bs x nvars x patch_num x patch_len]\n        x = self.W_P(x)                                                          # x: [bs x nvars x patch_num x d_model]\n\n        u = torch.reshape(x, (x.shape[0]*x.shape[1],x.shape[2],x.shape[3]))      # u: [bs * nvars x patch_num x d_model]\n        u = self.dropout(u + self.W_pos)                                         # u: [bs * nvars x patch_num x d_model]\n\n        # Encoder\n        z = self.encoder(u)                                                      # z: [bs * nvars x patch_num x d_model]\n        z = torch.reshape(z, (-1,n_vars,z.shape[-2],z.shape[-1]))                # z: [bs x nvars x patch_num x d_model]\n        z = z.permute(0,1,3,2)                                                   # z: [bs x nvars x d_model x patch_num]\n        \n        return z    \n            \n            \n    \n# Cell\nclass TSTEncoder(nn.Module):\n    def __init__(self, q_len, d_model, n_heads, d_k=None, d_v=None, d_ff=None, \n                        norm='BatchNorm', attn_dropout=0., dropout=0., activation='gelu',\n                        res_attention=False, n_layers=1, pre_norm=False, store_attn=False):\n        super().__init__()\n\n        self.layers = nn.ModuleList([TSTEncoderLayer(q_len, d_model, n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm,\n                                                      attn_dropout=attn_dropout, dropout=dropout,\n                                                      activation=activation, res_attention=res_attention,\n                                                      pre_norm=pre_norm, store_attn=store_attn) for i in range(n_layers)])\n        self.res_attention = res_attention\n\n    def forward(self, src:Tensor, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None):\n        output = src\n        scores = None\n        if self.res_attention:\n            for mod in self.layers: output, scores = mod(output, prev=scores, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\n            return output\n        else:\n            for mod in self.layers: output = mod(output, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\n            return output\n\n\n\nclass TSTEncoderLayer(nn.Module):\n    def __init__(self, q_len, d_model, n_heads, d_k=None, d_v=None, d_ff=256, store_attn=False,\n                 norm='BatchNorm', attn_dropout=0, dropout=0., bias=True, activation=\"gelu\", res_attention=False, pre_norm=False):\n        super().__init__()\n        assert not d_model%n_heads, f\"d_model ({d_model}) must be divisible by n_heads ({n_heads})\"\n        d_k = d_model // n_heads if d_k is None else d_k\n        d_v = d_model // n_heads if d_v is None else d_v\n\n        # Multi-Head attention\n        self.res_attention = res_attention\n        self.self_attn = _MultiheadAttention(d_model, n_heads, d_k, d_v, attn_dropout=attn_dropout, proj_dropout=dropout, res_attention=res_attention)\n\n        # Add & Norm\n        self.dropout_attn = nn.Dropout(dropout)\n        if \"batch\" in norm.lower():\n            self.norm_attn = nn.Sequential(Transpose(1,2), nn.BatchNorm1d(d_model), Transpose(1,2))\n        else:\n            self.norm_attn = nn.LayerNorm(d_model)\n\n        # Position-wise Feed-Forward\n        self.ff = nn.Sequential(nn.Linear(d_model, d_ff, bias=bias),\n                                get_activation_fn(activation),\n                                nn.Dropout(dropout),\n                                nn.Linear(d_ff, d_model, bias=bias))\n\n        # Add & Norm\n        self.dropout_ffn = nn.Dropout(dropout)\n        if \"batch\" in norm.lower():\n            self.norm_ffn = nn.Sequential(Transpose(1,2), nn.BatchNorm1d(d_model), Transpose(1,2))\n        else:\n            self.norm_ffn = nn.LayerNorm(d_model)\n\n        self.pre_norm = pre_norm\n        self.store_attn = store_attn\n\n\n    def forward(self, src:Tensor, prev:Optional[Tensor]=None, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None) -> Tensor:\n\n        # Multi-Head attention sublayer\n        if self.pre_norm:\n            src = self.norm_attn(src)\n        ## Multi-Head attention\n        if self.res_attention:\n            src2, attn, scores = self.self_attn(src, src, src, prev, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\n        else:\n            src2, attn = self.self_attn(src, src, src, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\n        if self.store_attn:\n            self.attn = attn\n        ## Add & Norm\n        src = src + self.dropout_attn(src2) # Add: residual connection with residual dropout\n        if not self.pre_norm:\n            src = self.norm_attn(src)\n\n        # Feed-forward sublayer\n        if self.pre_norm:\n            src = self.norm_ffn(src)\n        ## Position-wise Feed-Forward\n        src2 = self.ff(src)\n        ## Add & Norm\n        src = src + self.dropout_ffn(src2) # Add: residual connection with residual dropout\n        if not self.pre_norm:\n            src = self.norm_ffn(src)\n\n        if self.res_attention:\n            return src, scores\n        else:\n            return src\n\n\n\n\nclass _MultiheadAttention(nn.Module):\n    def __init__(self, d_model, n_heads, d_k=None, d_v=None, res_attention=False, attn_dropout=0., proj_dropout=0., qkv_bias=True, lsa=False):\n        \"\"\"Multi Head Attention Layer\n        Input shape:\n            Q:       [batch_size (bs) x max_q_len x d_model]\n            K, V:    [batch_size (bs) x q_len x d_model]\n            mask:    [q_len x q_len]\n        \"\"\"\n        super().__init__()\n        d_k = d_model // n_heads if d_k is None else d_k\n        d_v = d_model // n_heads if d_v is None else d_v\n\n        self.n_heads, self.d_k, self.d_v = n_heads, d_k, d_v\n\n        self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias)\n        self.W_K = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias)\n        self.W_V = nn.Linear(d_model, d_v * n_heads, bias=qkv_bias)\n\n        # Scaled Dot-Product Attention (multiple heads)\n        self.res_attention = res_attention\n        self.sdp_attn = _ScaledDotProductAttention(d_model, n_heads, attn_dropout=attn_dropout, res_attention=self.res_attention, lsa=lsa)\n\n        # Poject output\n        self.to_out = nn.Sequential(nn.Linear(n_heads * d_v, d_model), nn.Dropout(proj_dropout))\n\n\n    def forward(self, Q:Tensor, K:Optional[Tensor]=None, V:Optional[Tensor]=None, prev:Optional[Tensor]=None,\n                key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None):\n\n        bs = Q.size(0)\n        if K is None: K = Q\n        if V is None: V = Q\n\n        # Linear (+ split in multiple heads)\n        q_s = self.W_Q(Q).view(bs, -1, self.n_heads, self.d_k).transpose(1,2)       # q_s    : [bs x n_heads x max_q_len x d_k]\n        k_s = self.W_K(K).view(bs, -1, self.n_heads, self.d_k).permute(0,2,3,1)     # k_s    : [bs x n_heads x d_k x q_len] - transpose(1,2) + transpose(2,3)\n        v_s = self.W_V(V).view(bs, -1, self.n_heads, self.d_v).transpose(1,2)       # v_s    : [bs x n_heads x q_len x d_v]\n\n        # Apply Scaled Dot-Product Attention (multiple heads)\n        if self.res_attention:\n            output, attn_weights, attn_scores = self.sdp_attn(q_s, k_s, v_s, prev=prev, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\n        else:\n            output, attn_weights = self.sdp_attn(q_s, k_s, v_s, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\n        # output: [bs x n_heads x q_len x d_v], attn: [bs x n_heads x q_len x q_len], scores: [bs x n_heads x max_q_len x q_len]\n\n        # back to the original inputs dimensions\n        output = output.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.d_v) # output: [bs x q_len x n_heads * d_v]\n        output = self.to_out(output)\n\n        if self.res_attention: return output, attn_weights, attn_scores\n        else: return output, attn_weights\n\n\nclass _ScaledDotProductAttention(nn.Module):\n    r\"\"\"Scaled Dot-Product Attention module (Attention is all you need by Vaswani et al., 2017) with optional residual attention from previous layer\n    (Realformer: Transformer likes residual attention by He et al, 2020) and locality self sttention (Vision Transformer for Small-Size Datasets\n    by Lee et al, 2021)\"\"\"\n\n    def __init__(self, d_model, n_heads, attn_dropout=0., res_attention=False, lsa=False):\n        super().__init__()\n        self.attn_dropout = nn.Dropout(attn_dropout)\n        self.res_attention = res_attention\n        head_dim = d_model // n_heads\n        self.scale = nn.Parameter(torch.tensor(head_dim ** -0.5), requires_grad=lsa)\n        self.lsa = lsa\n\n    def forward(self, q:Tensor, k:Tensor, v:Tensor, prev:Optional[Tensor]=None, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None):\n        '''\n        Input shape:\n            q               : [bs x n_heads x max_q_len x d_k]\n            k               : [bs x n_heads x d_k x seq_len]\n            v               : [bs x n_heads x seq_len x d_v]\n            prev            : [bs x n_heads x q_len x seq_len]\n            key_padding_mask: [bs x seq_len]\n            attn_mask       : [1 x seq_len x seq_len]\n        Output shape:\n            output:  [bs x n_heads x q_len x d_v]\n            attn   : [bs x n_heads x q_len x seq_len]\n            scores : [bs x n_heads x q_len x seq_len]\n        '''\n\n        # Scaled MatMul (q, k) - similarity scores for all pairs of positions in an input sequence\n        attn_scores = torch.matmul(q, k) * self.scale      # attn_scores : [bs x n_heads x max_q_len x q_len]\n\n        # Add pre-softmax attention scores from the previous layer (optional)\n        if prev is not None: attn_scores = attn_scores + prev\n\n        # Attention mask (optional)\n        if attn_mask is not None:                                     # attn_mask with shape [q_len x seq_len] - only used when q_len == seq_len\n            if attn_mask.dtype == torch.bool:\n                attn_scores.masked_fill_(attn_mask, -np.inf)\n            else:\n                attn_scores += attn_mask\n\n        # Key padding mask (optional)\n        if key_padding_mask is not None:                              # mask with shape [bs x q_len] (only when max_w_len == q_len)\n            attn_scores.masked_fill_(key_padding_mask.unsqueeze(1).unsqueeze(2), -np.inf)\n\n        # normalize the attention weights\n        attn_weights = F.softmax(attn_scores, dim=-1)                 # attn_weights   : [bs x n_heads x max_q_len x q_len]\n        attn_weights = self.attn_dropout(attn_weights)\n\n        # compute the new values given the attention weights\n        output = torch.matmul(attn_weights, v)                        # output: [bs x n_heads x max_q_len x d_v]\n\n        if self.res_attention: return output, attn_weights, attn_scores\n        else: return output, attn_weights\n\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/PatchTST_layers.py",
    "content": "import torch\nfrom torch import nn\nimport math\n\nclass Transpose(nn.Module):\n    def __init__(self, *dims, contiguous=False): \n        super().__init__()\n        self.dims, self.contiguous = dims, contiguous\n    def forward(self, x):\n        if self.contiguous: return x.transpose(*self.dims).contiguous()\n        else: return x.transpose(*self.dims)\n\n    \ndef get_activation_fn(activation):\n    if callable(activation): return activation()\n    elif activation.lower() == \"relu\": return nn.ReLU()\n    elif activation.lower() == \"gelu\": return nn.GELU()\n    raise ValueError(f'{activation} is not available. You can use \"relu\", \"gelu\", or a callable') \n    \n    \n# decomposition\n\nclass moving_avg(nn.Module):\n    \"\"\"\n    Moving average block to highlight the trend of time series\n    \"\"\"\n    def __init__(self, kernel_size, stride):\n        super(moving_avg, self).__init__()\n        self.kernel_size = kernel_size\n        self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)\n\n    def forward(self, x):\n        # padding on the both ends of time series\n        front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)\n        end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)\n        x = torch.cat([front, x, end], dim=1)\n        x = self.avg(x.permute(0, 2, 1))\n        x = x.permute(0, 2, 1)\n        return x\n\n\nclass series_decomp(nn.Module):\n    \"\"\"\n    Series decomposition block\n    \"\"\"\n    def __init__(self, kernel_size):\n        super(series_decomp, self).__init__()\n        self.moving_avg = moving_avg(kernel_size, stride=1)\n\n    def forward(self, x):\n        moving_mean = self.moving_avg(x)\n        res = x - moving_mean\n        return res, moving_mean\n    \n    \n    \n# pos_encoding\n\ndef PositionalEncoding(q_len, d_model, normalize=True):\n    pe = torch.zeros(q_len, d_model)\n    position = torch.arange(0, q_len).unsqueeze(1)\n    div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))\n    pe[:, 0::2] = torch.sin(position * div_term)\n    pe[:, 1::2] = torch.cos(position * div_term)\n    if normalize:\n        pe = pe - pe.mean()\n        pe = pe / (pe.std() * 10)\n    return pe\n\nSinCosPosEncoding = PositionalEncoding\n\ndef Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True, eps=1e-3, verbose=False):\n    x = .5 if exponential else 1\n    i = 0\n    for i in range(100):\n        cpe = 2 * (torch.linspace(0, 1, q_len).reshape(-1, 1) ** x) * (torch.linspace(0, 1, d_model).reshape(1, -1) ** x) - 1\n        pv(f'{i:4.0f}  {x:5.3f}  {cpe.mean():+6.3f}', verbose)\n        if abs(cpe.mean()) <= eps: break\n        elif cpe.mean() > eps: x += .001\n        else: x -= .001\n        i += 1\n    if normalize:\n        cpe = cpe - cpe.mean()\n        cpe = cpe / (cpe.std() * 10)\n    return cpe\n\ndef Coord1dPosEncoding(q_len, exponential=False, normalize=True):\n    cpe = (2 * (torch.linspace(0, 1, q_len).reshape(-1, 1)**(.5 if exponential else 1)) - 1)\n    if normalize:\n        cpe = cpe - cpe.mean()\n        cpe = cpe / (cpe.std() * 10)\n    return cpe\n\ndef positional_encoding(pe, learn_pe, q_len, d_model):\n    # Positional encoding\n    if pe == None:\n        W_pos = torch.empty((q_len, d_model)) # pe = None and learn_pe = False can be used to measure impact of pe\n        nn.init.uniform_(W_pos, -0.02, 0.02)\n        learn_pe = False\n    elif pe == 'zero':\n        W_pos = torch.empty((q_len, 1))\n        nn.init.uniform_(W_pos, -0.02, 0.02)\n    elif pe == 'zeros':\n        W_pos = torch.empty((q_len, d_model))\n        nn.init.uniform_(W_pos, -0.02, 0.02)\n    elif pe == 'normal' or pe == 'gauss':\n        W_pos = torch.zeros((q_len, 1))\n        torch.nn.init.normal_(W_pos, mean=0.0, std=0.1)\n    elif pe == 'uniform':\n        W_pos = torch.zeros((q_len, 1))\n        nn.init.uniform_(W_pos, a=0.0, b=0.1)\n    elif pe == 'lin1d': W_pos = Coord1dPosEncoding(q_len, exponential=False, normalize=True)\n    elif pe == 'exp1d': W_pos = Coord1dPosEncoding(q_len, exponential=True, normalize=True)\n    elif pe == 'lin2d': W_pos = Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True)\n    elif pe == 'exp2d': W_pos = Coord2dPosEncoding(q_len, d_model, exponential=True, normalize=True)\n    elif pe == 'sincos': W_pos = PositionalEncoding(q_len, d_model, normalize=True)\n    else: raise ValueError(f\"{pe} is not a valid pe (positional encoder. Available types: 'gauss'=='normal', \\\n        'zeros', 'zero', uniform', 'lin1d', 'exp1d', 'lin2d', 'exp2d', 'sincos', None.)\")\n    return nn.Parameter(W_pos, requires_grad=learn_pe)"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/RevIN.py",
    "content": "# code from https://github.com/ts-kim/RevIN, with minor modifications\n\nimport torch\nimport torch.nn as nn\n\nclass RevIN(nn.Module):\n    def __init__(self, num_features: int, eps=1e-5, affine=True, subtract_last=False):\n        \"\"\"\n        :param num_features: the number of features or channels\n        :param eps: a value added for numerical stability\n        :param affine: if True, RevIN has learnable affine parameters\n        \"\"\"\n        super(RevIN, self).__init__()\n        self.num_features = num_features\n        self.eps = eps\n        self.affine = affine\n        self.subtract_last = subtract_last\n        if self.affine:\n            self._init_params()\n\n    def forward(self, x, mode:str):\n        if mode == 'norm':\n            self._get_statistics(x)\n            x = self._normalize(x)\n        elif mode == 'denorm':\n            x = self._denormalize(x)\n        else: raise NotImplementedError\n        return x\n\n    def _init_params(self):\n        # initialize RevIN params: (C,)\n        self.affine_weight = nn.Parameter(torch.ones(self.num_features))\n        self.affine_bias = nn.Parameter(torch.zeros(self.num_features))\n\n    def _get_statistics(self, x):\n        dim2reduce = tuple(range(1, x.ndim-1))\n        if self.subtract_last:\n            self.last = x[:,-1,:].unsqueeze(1)\n        else:\n            self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()\n        self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()\n\n    def _normalize(self, x):\n        if self.subtract_last:\n            x = x - self.last\n        else:\n            x = x - self.mean\n        x = x / self.stdev\n        if self.affine:\n            x = x * self.affine_weight\n            x = x + self.affine_bias\n        return x\n\n    def _denormalize(self, x):\n        if self.affine:\n            x = x - self.affine_bias\n            x = x / (self.affine_weight + self.eps*self.eps)\n        x = x * self.stdev\n        if self.subtract_last:\n            x = x + self.last\n        else:\n            x = x + self.mean\n        return x\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/SelfAttention_Family.py",
    "content": "import torch\nimport torch.nn as nn\nimport numpy as np\nfrom math import sqrt\nfrom Other_baselines.utils.masking import TriangularCausalMask, ProbMask\nfrom reformer_pytorch import LSHSelfAttention\nfrom einops import rearrange, repeat\n\n\nclass DSAttention(nn.Module):\n    '''De-stationary Attention'''\n\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n        super(DSAttention, self).__init__()\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        scale = self.scale or 1. / sqrt(E)\n\n        tau = 1.0 if tau is None else tau.unsqueeze(\n            1).unsqueeze(1)  # B x 1 x 1 x 1\n        delta = 0.0 if delta is None else delta.unsqueeze(\n            1).unsqueeze(1)  # B x 1 x 1 x S\n\n        # De-stationary Attention, rescaling pre-softmax score with learned de-stationary factors\n        scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys) * tau + delta\n\n        if self.mask_flag:\n            if attn_mask is None:\n                attn_mask = TriangularCausalMask(B, L, device=queries.device)\n\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        A = self.dropout(torch.softmax(scale * scores, dim=-1))\n        V = torch.einsum(\"bhls,bshd->blhd\", A, values)\n\n        if self.output_attention:\n            return V.contiguous(), A\n        else:\n            return V.contiguous(), None\n\n\nclass FullAttention(nn.Module):\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n        super(FullAttention, self).__init__()\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        scale = self.scale or 1. / sqrt(E)\n\n        scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys)\n\n        if self.mask_flag:\n            if attn_mask is None:\n                attn_mask = TriangularCausalMask(B, L, device=queries.device)\n\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        A = self.dropout(torch.softmax(scale * scores, dim=-1))\n        V = torch.einsum(\"bhls,bshd->blhd\", A, values)\n\n        if self.output_attention:\n            return V.contiguous(), A\n        else:\n            return V.contiguous(), None\n\n\nclass ProbAttention(nn.Module):\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n        super(ProbAttention, self).__init__()\n        self.factor = factor\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def _prob_QK(self, Q, K, sample_k, n_top):  # n_top: c*ln(L_q)\n        # Q [B, H, L, D]\n        B, H, L_K, E = K.shape\n        _, _, L_Q, _ = Q.shape\n\n        # calculate the sampled Q_K\n        K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E)\n        # real U = U_part(factor*ln(L_k))*L_q\n        index_sample = torch.randint(L_K, (L_Q, sample_k))\n        K_sample = K_expand[:, :, torch.arange(\n            L_Q).unsqueeze(1), index_sample, :]\n        Q_K_sample = torch.matmul(\n            Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze()\n\n        # find the Top_k query with sparisty measurement\n        M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K)\n        M_top = M.topk(n_top, sorted=False)[1]\n\n        # use the reduced Q to calculate Q_K\n        Q_reduce = Q[torch.arange(B)[:, None, None],\n                   torch.arange(H)[None, :, None],\n                   M_top, :]  # factor*ln(L_q)\n        Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1))  # factor*ln(L_q)*L_k\n\n        return Q_K, M_top\n\n    def _get_initial_context(self, V, L_Q):\n        B, H, L_V, D = V.shape\n        if not self.mask_flag:\n            # V_sum = V.sum(dim=-2)\n            V_sum = V.mean(dim=-2)\n            contex = V_sum.unsqueeze(-2).expand(B, H,\n                                                L_Q, V_sum.shape[-1]).clone()\n        else:  # use mask\n            # requires that L_Q == L_V, i.e. for self-attention only\n            assert (L_Q == L_V)\n            contex = V.cumsum(dim=-2)\n        return contex\n\n    def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):\n        B, H, L_V, D = V.shape\n\n        if self.mask_flag:\n            attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device)\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        attn = torch.softmax(scores, dim=-1)  # nn.Softmax(dim=-1)(scores)\n\n        context_in[torch.arange(B)[:, None, None],\n        torch.arange(H)[None, :, None],\n        index, :] = torch.matmul(attn, V).type_as(context_in)\n        if self.output_attention:\n            attns = (torch.ones([B, H, L_V, L_V]) /\n                     L_V).type_as(attn).to(attn.device)\n            attns[torch.arange(B)[:, None, None], torch.arange(H)[\n                                                  None, :, None], index, :] = attn\n            return context_in, attns\n        else:\n            return context_in, None\n\n    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n        B, L_Q, H, D = queries.shape\n        _, L_K, _, _ = keys.shape\n\n        queries = queries.transpose(2, 1)\n        keys = keys.transpose(2, 1)\n        values = values.transpose(2, 1)\n\n        U_part = self.factor * \\\n                 np.ceil(np.log(L_K)).astype('int').item()  # c*ln(L_k)\n        u = self.factor * \\\n            np.ceil(np.log(L_Q)).astype('int').item()  # c*ln(L_q)\n\n        U_part = U_part if U_part < L_K else L_K\n        u = u if u < L_Q else L_Q\n\n        scores_top, index = self._prob_QK(\n            queries, keys, sample_k=U_part, n_top=u)\n\n        # add scale factor\n        scale = self.scale or 1. / sqrt(D)\n        if scale is not None:\n            scores_top = scores_top * scale\n        # get the context\n        context = self._get_initial_context(values, L_Q)\n        # update the context with selected top_k queries\n        context, attn = self._update_context(\n            context, values, scores_top, index, L_Q, attn_mask)\n\n        return context.contiguous(), attn\n\n\nclass AttentionLayer(nn.Module):\n    def __init__(self, attention, d_model, n_heads, d_keys=None,\n                 d_values=None):\n        super(AttentionLayer, self).__init__()\n\n        d_keys = d_keys or (d_model // n_heads)\n        d_values = d_values or (d_model // n_heads)\n\n        self.inner_attention = attention\n        self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.value_projection = nn.Linear(d_model, d_values * n_heads)\n        self.out_projection = nn.Linear(d_values * n_heads, d_model)\n        self.n_heads = n_heads\n\n    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n        B, L, _ = queries.shape\n        _, S, _ = keys.shape\n        H = self.n_heads\n\n        queries = self.query_projection(queries).view(B, L, H, -1)\n        keys = self.key_projection(keys).view(B, S, H, -1)\n        values = self.value_projection(values).view(B, S, H, -1)\n\n        out, attn = self.inner_attention(\n            queries,\n            keys,\n            values,\n            attn_mask,\n            tau=tau,\n            delta=delta\n        )\n        out = out.view(B, L, -1)\n\n        return self.out_projection(out), attn\n\n\nclass ReformerLayer(nn.Module):\n    def __init__(self, attention, d_model, n_heads, d_keys=None,\n                 d_values=None, causal=False, bucket_size=4, n_hashes=4):\n        super().__init__()\n        self.bucket_size = bucket_size\n        self.attn = LSHSelfAttention(\n            dim=d_model,\n            heads=n_heads,\n            bucket_size=bucket_size,\n            n_hashes=n_hashes,\n            causal=causal\n        )\n\n    def fit_length(self, queries):\n        # inside reformer: assert N % (bucket_size * 2) == 0\n        B, N, C = queries.shape\n        if N % (self.bucket_size * 2) == 0:\n            return queries\n        else:\n            # fill the time series\n            fill_len = (self.bucket_size * 2) - (N % (self.bucket_size * 2))\n            return torch.cat([queries, torch.zeros([B, fill_len, C]).to(queries.device)], dim=1)\n\n    def forward(self, queries, keys, values, attn_mask, tau, delta):\n        # in Reformer: defalut queries=keys\n        B, N, C = queries.shape\n        queries = self.attn(self.fit_length(queries))[:, :N, :]\n        return queries, None\n\n\nclass TwoStageAttentionLayer(nn.Module):\n    '''\n    The Two Stage Attention (TSA) Layer\n    input/output shape: [batch_size, Data_dim(D), Seg_num(L), d_model]\n    '''\n\n    def __init__(self, configs,\n                 seg_num, factor, d_model, n_heads, d_ff=None, dropout=0.1):\n        super(TwoStageAttentionLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.time_attention = AttentionLayer(FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                                           output_attention=configs.output_attention), d_model, n_heads)\n        self.dim_sender = AttentionLayer(FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                                       output_attention=configs.output_attention), d_model, n_heads)\n        self.dim_receiver = AttentionLayer(FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                                         output_attention=configs.output_attention), d_model, n_heads)\n        self.router = nn.Parameter(torch.randn(seg_num, factor, d_model))\n\n        self.dropout = nn.Dropout(dropout)\n\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.norm3 = nn.LayerNorm(d_model)\n        self.norm4 = nn.LayerNorm(d_model)\n\n        self.MLP1 = nn.Sequential(nn.Linear(d_model, d_ff),\n                                  nn.GELU(),\n                                  nn.Linear(d_ff, d_model))\n        self.MLP2 = nn.Sequential(nn.Linear(d_model, d_ff),\n                                  nn.GELU(),\n                                  nn.Linear(d_ff, d_model))\n\n    def forward(self, x, attn_mask=None, tau=None, delta=None):\n        # Cross Time Stage: Directly apply MSA to each dimension\n        batch = x.shape[0]\n        time_in = rearrange(x, 'b ts_d seg_num d_model -> (b ts_d) seg_num d_model')\n        time_enc, attn = self.time_attention(\n            time_in, time_in, time_in, attn_mask=None, tau=None, delta=None\n        )\n        dim_in = time_in + self.dropout(time_enc)\n        dim_in = self.norm1(dim_in)\n        dim_in = dim_in + self.dropout(self.MLP1(dim_in))\n        dim_in = self.norm2(dim_in)\n\n        # Cross Dimension Stage: use a small set of learnable vectors to aggregate and distribute messages to build the D-to-D connection\n        dim_send = rearrange(dim_in, '(b ts_d) seg_num d_model -> (b seg_num) ts_d d_model', b=batch)\n        batch_router = repeat(self.router, 'seg_num factor d_model -> (repeat seg_num) factor d_model', repeat=batch)\n        dim_buffer, attn = self.dim_sender(batch_router, dim_send, dim_send, attn_mask=None, tau=None, delta=None)\n        dim_receive, attn = self.dim_receiver(dim_send, dim_buffer, dim_buffer, attn_mask=None, tau=None, delta=None)\n        dim_enc = dim_send + self.dropout(dim_receive)\n        dim_enc = self.norm3(dim_enc)\n        dim_enc = dim_enc + self.dropout(self.MLP2(dim_enc))\n        dim_enc = self.norm4(dim_enc)\n\n        final_out = rearrange(dim_enc, '(b seg_num) ts_d d_model -> b ts_d seg_num d_model', b=batch)\n\n        return final_out\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/Transformer_EncDec.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvLayer(nn.Module):\n    def __init__(self, c_in):\n        super(ConvLayer, self).__init__()\n        self.downConv = nn.Conv1d(in_channels=c_in,\n                                  out_channels=c_in,\n                                  kernel_size=3,\n                                  padding=2,\n                                  padding_mode='circular')\n        self.norm = nn.BatchNorm1d(c_in)\n        self.activation = nn.ELU()\n        self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)\n\n    def forward(self, x):\n        x = self.downConv(x.permute(0, 2, 1))\n        x = self.norm(x)\n        x = self.activation(x)\n        x = self.maxPool(x)\n        x = x.transpose(1, 2)\n        return x\n\n\nclass EncoderLayer(nn.Module):\n    def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation=\"relu\"):\n        super(EncoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.attention = attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, attn_mask=None, tau=None, delta=None):\n        new_x, attn = self.attention(\n            x, x, x,\n            attn_mask=attn_mask,\n            tau=tau, delta=delta\n        )\n        x = x + self.dropout(new_x)\n\n        y = x = self.norm1(x)\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n\n        return self.norm2(x + y), attn\n\n\nclass Encoder(nn.Module):\n    def __init__(self, attn_layers, conv_layers=None, norm_layer=None):\n        super(Encoder, self).__init__()\n        self.attn_layers = nn.ModuleList(attn_layers)\n        self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None\n        self.norm = norm_layer\n\n    def forward(self, x, attn_mask=None, tau=None, delta=None):\n        # x [B, L, D]\n        attns = []\n        if self.conv_layers is not None:\n            for i, (attn_layer, conv_layer) in enumerate(zip(self.attn_layers, self.conv_layers)):\n                delta = delta if i == 0 else None\n                x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)\n                x = conv_layer(x)\n                attns.append(attn)\n            x, attn = self.attn_layers[-1](x, tau=tau, delta=None)\n            attns.append(attn)\n        else:\n            for attn_layer in self.attn_layers:\n                x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)\n                attns.append(attn)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        return x, attns\n\n\nclass DecoderLayer(nn.Module):\n    def __init__(self, self_attention, cross_attention, d_model, d_ff=None,\n                 dropout=0.1, activation=\"relu\"):\n        super(DecoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.self_attention = self_attention\n        self.cross_attention = cross_attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.norm3 = nn.LayerNorm(d_model)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):\n        x = x + self.dropout(self.self_attention(\n            x, x, x,\n            attn_mask=x_mask,\n            tau=tau, delta=None\n        )[0])\n        x = self.norm1(x)\n\n        x = x + self.dropout(self.cross_attention(\n            x, cross, cross,\n            attn_mask=cross_mask,\n            tau=tau, delta=delta\n        )[0])\n\n        y = x = self.norm2(x)\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n\n        return self.norm3(x + y)\n\n\nclass Decoder(nn.Module):\n    def __init__(self, layers, norm_layer=None, projection=None):\n        super(Decoder, self).__init__()\n        self.layers = nn.ModuleList(layers)\n        self.norm = norm_layer\n        self.projection = projection\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):\n        for layer in self.layers:\n            x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask, tau=tau, delta=delta)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        if self.projection is not None:\n            x = self.projection(x)\n        return x\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/layers/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/Autoformer.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Other_baselines.layers.Embed import DataEmbedding, DataEmbedding_wo_pos\nfrom Other_baselines.layers.AutoCorrelation import AutoCorrelation, AutoCorrelationLayer\nfrom Other_baselines.layers.Autoformer_EncDec import Encoder, Decoder, EncoderLayer, DecoderLayer, my_Layernorm, series_decomp\nimport math\nimport numpy as np\n\n\nclass Model(nn.Module):\n    \"\"\"\n    Autoformer is the first method to achieve the series-wise connection,\n    with inherent O(LlogL) complexity\n    Paper link: https://openreview.net/pdf?id=I55UqU-M11y\n    \"\"\"\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        self.task_name = configs.task_name\n        self.seq_len = configs.seq_len\n        self.label_len = configs.label_len\n        self.pred_len = configs.pred_len\n        self.output_attention = configs.output_attention\n\n        # Decomp\n        kernel_size = configs.moving_avg\n        self.decomp = series_decomp(kernel_size)\n\n        # Embedding\n        self.enc_embedding = DataEmbedding_wo_pos(configs.enc_in, configs.d_model, configs.embed, configs.freq,\n                                                  configs.dropout)\n        # Encoder\n        self.encoder = Encoder(\n            [\n                EncoderLayer(\n                    AutoCorrelationLayer(\n                        AutoCorrelation(False, configs.factor, attention_dropout=configs.dropout,\n                                        output_attention=configs.output_attention),\n                        configs.d_model, configs.n_heads),\n                    configs.d_model,\n                    configs.d_ff,\n                    moving_avg=configs.moving_avg,\n                    dropout=configs.dropout,\n                    activation=configs.activation\n                ) for l in range(configs.e_layers)\n            ],\n            norm_layer=my_Layernorm(configs.d_model)\n        )\n        # Decoder\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            self.dec_embedding = DataEmbedding_wo_pos(configs.dec_in, configs.d_model, configs.embed, configs.freq,\n                                                      configs.dropout)\n            self.decoder = Decoder(\n                [\n                    DecoderLayer(\n                        AutoCorrelationLayer(\n                            AutoCorrelation(True, configs.factor, attention_dropout=configs.dropout,\n                                            output_attention=False),\n                            configs.d_model, configs.n_heads),\n                        AutoCorrelationLayer(\n                            AutoCorrelation(False, configs.factor, attention_dropout=configs.dropout,\n                                            output_attention=False),\n                            configs.d_model, configs.n_heads),\n                        configs.d_model,\n                        configs.c_out,\n                        configs.d_ff,\n                        moving_avg=configs.moving_avg,\n                        dropout=configs.dropout,\n                        activation=configs.activation,\n                    )\n                    for l in range(configs.d_layers)\n                ],\n                norm_layer=my_Layernorm(configs.d_model),\n                projection=nn.Linear(configs.d_model, configs.c_out, bias=True)\n            )\n        if self.task_name == 'imputation':\n            self.projection = nn.Linear(\n                configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'anomaly_detection':\n            self.projection = nn.Linear(\n                configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'classification':\n            self.act = F.gelu\n            self.dropout = nn.Dropout(configs.dropout)\n            self.projection = nn.Linear(\n                configs.d_model * configs.seq_len, configs.num_class)\n\n    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        # decomp init\n        mean = torch.mean(x_enc, dim=1).unsqueeze(\n            1).repeat(1, self.pred_len, 1)\n        zeros = torch.zeros([x_dec.shape[0], self.pred_len,\n                             x_dec.shape[2]], device=x_enc.device)\n        seasonal_init, trend_init = self.decomp(x_enc)\n        # decoder input\n        trend_init = torch.cat(\n            [trend_init[:, -self.label_len:, :], mean], dim=1)\n        seasonal_init = torch.cat(\n            [seasonal_init[:, -self.label_len:, :], zeros], dim=1)\n        # enc\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n        # dec\n        dec_out = self.dec_embedding(seasonal_init, x_mark_dec)\n        seasonal_part, trend_part = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None,\n                                                 trend=trend_init)\n        # final\n        dec_out = trend_part + seasonal_part\n        return dec_out\n\n    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):\n        # enc\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n        # final\n        dec_out = self.projection(enc_out)\n        return dec_out\n\n    def anomaly_detection(self, x_enc):\n        # enc\n        enc_out = self.enc_embedding(x_enc, None)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n        # final\n        dec_out = self.projection(enc_out)\n        return dec_out\n\n    def classification(self, x_enc, x_mark_enc):\n        # enc\n        enc_out = self.enc_embedding(x_enc, None)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        # Output\n        # the output transformer encoder/decoder embeddings don't include non-linearity\n        output = self.act(enc_out)\n        output = self.dropout(output)\n        # zero-out padding embeddings\n        output = output * x_mark_enc.unsqueeze(-1)\n        # (batch_size, seq_length * d_model)\n        output = output.reshape(output.shape[0], -1)\n        output = self.projection(output)  # (batch_size, num_classes)\n        return output\n\n    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(\n                x_enc, x_mark_enc, x_dec, x_mark_dec, mask)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc, x_mark_enc)\n            return dec_out  # [B, N]\n        return None\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/DLinear.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Other_baselines.layers.Autoformer_EncDec import series_decomp\n\n\nclass Model(nn.Module):\n    \"\"\"\n    Paper link: https://arxiv.org/pdf/2205.13504.pdf\n    \"\"\"\n\n    def __init__(self, configs, individual=False):\n        \"\"\"\n        individual: Bool, whether shared model among different variates.\n        \"\"\"\n        super(Model, self).__init__()\n        self.task_name = configs.task_name\n        self.seq_len = configs.seq_len\n        if self.task_name == 'classification' or self.task_name == 'anomaly_detection' or self.task_name == 'imputation':\n            self.pred_len = configs.seq_len\n        else:\n            self.pred_len = configs.pred_len\n        # Series decomposition block from Autoformer\n        self.decompsition = series_decomp(configs.moving_avg)\n        self.individual = individual\n        self.channels = configs.enc_in\n\n        if self.individual:\n            self.Linear_Seasonal = nn.ModuleList()\n            self.Linear_Trend = nn.ModuleList()\n\n            for i in range(self.channels):\n                self.Linear_Seasonal.append(\n                    nn.Linear(self.seq_len, self.pred_len))\n                self.Linear_Trend.append(\n                    nn.Linear(self.seq_len, self.pred_len))\n\n                self.Linear_Seasonal[i].weight = nn.Parameter(\n                    (1 / self.seq_len) * torch.ones([self.pred_len, self.seq_len]))\n                self.Linear_Trend[i].weight = nn.Parameter(\n                    (1 / self.seq_len) * torch.ones([self.pred_len, self.seq_len]))\n        else:\n            self.Linear_Seasonal = nn.Linear(self.seq_len, self.pred_len)\n            self.Linear_Trend = nn.Linear(self.seq_len, self.pred_len)\n\n            self.Linear_Seasonal.weight = nn.Parameter(\n                (1 / self.seq_len) * torch.ones([self.pred_len, self.seq_len]))\n            self.Linear_Trend.weight = nn.Parameter(\n                (1 / self.seq_len) * torch.ones([self.pred_len, self.seq_len]))\n\n        if self.task_name == 'classification':\n            self.projection = nn.Linear(\n                configs.enc_in * configs.seq_len, configs.num_class)\n\n    def encoder(self, x):\n        seasonal_init, trend_init = self.decompsition(x)\n        seasonal_init, trend_init = seasonal_init.permute(\n            0, 2, 1), trend_init.permute(0, 2, 1)\n        if self.individual:\n            seasonal_output = torch.zeros([seasonal_init.size(0), seasonal_init.size(1), self.pred_len],\n                                          dtype=seasonal_init.dtype).to(seasonal_init.device)\n            trend_output = torch.zeros([trend_init.size(0), trend_init.size(1), self.pred_len],\n                                       dtype=trend_init.dtype).to(trend_init.device)\n            for i in range(self.channels):\n                seasonal_output[:, i, :] = self.Linear_Seasonal[i](\n                    seasonal_init[:, i, :])\n                trend_output[:, i, :] = self.Linear_Trend[i](\n                    trend_init[:, i, :])\n        else:\n            seasonal_output = self.Linear_Seasonal(seasonal_init)\n            trend_output = self.Linear_Trend(trend_init)\n        x = seasonal_output + trend_output\n        return x.permute(0, 2, 1)\n\n    def forecast(self, x_enc):\n        # Encoder\n        return self.encoder(x_enc)\n\n    def imputation(self, x_enc):\n        # Encoder\n        return self.encoder(x_enc)\n\n    def anomaly_detection(self, x_enc):\n        # Encoder\n        return self.encoder(x_enc)\n\n    def classification(self, x_enc):\n        # Encoder\n        enc_out = self.encoder(x_enc)\n        # Output\n        # (batch_size, seq_length * d_model)\n        output = enc_out.reshape(enc_out.shape[0], -1)\n        # (batch_size, num_classes)\n        output = self.projection(output)\n        return output\n\n    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            print(\"x_enc.shape = \", x_enc.shape)\n            dec_out = self.forecast(x_enc)\n            print(\"dec_out.shape = \", dec_out.shape)\n            print(\"dec_out[:, -self.pred_len:, :].shape = \", dec_out[:, -self.pred_len:, :].shape)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc)\n            return dec_out  # [B, N]\n        return None\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/GPT4TS.py",
    "content": "import torch\nimport torch.nn as nn\nimport os\n\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Model\nfrom einops import rearrange\nfrom transformers.models.gpt2.configuration_gpt2 import GPT2Config\n\n\nclass GPT4TS(nn.Module):\n\n    def __init__(self, configs, device):\n        super(GPT4TS, self).__init__()\n        self.is_gpt = configs.is_gpt\n        self.patch_size = configs.patch_size\n        self.pretrain = configs.pretrain\n        self.stride = configs.stride\n        self.patch_num = (configs.seq_len - self.patch_size) // self.stride + 1\n\n        self.padding_patch_layer = nn.ReplicationPad1d((0, self.stride))\n        self.patch_num += 1\n\n        if configs.is_gpt:\n            if configs.pretrain:\n                # self.gpt2 = GPT2Model.from_pretrained('gpt2', output_attentions=True,\n                #                                       output_hidden_states=True)  # loads a pretrained GPT-2 base model\n\n                if not os.path.exists(\"/dev_data/lz/gpt2\"):\n                    self.gpt2 = GPT2Model.from_pretrained('/SSD/lz/gpt2', output_attentions=True,\n                                                          output_hidden_states=True)\n                else:\n                    self.gpt2 = GPT2Model.from_pretrained('/dev_data/lz/gpt2', output_attentions=True,\n                                                          output_hidden_states=True)\n\n\n            else:\n                print(\"------------------no pretrain------------------\")\n                self.gpt2 = GPT2Model(GPT2Config())\n            self.gpt2.h = self.gpt2.h[:configs.gpt_layers]\n            # print(\"gpt2 = {}\".format(self.gpt2))\n\n        self.in_layer = nn.Linear(configs.patch_size, configs.d_model)\n        self.out_layer = nn.Linear(configs.d_model * self.patch_num, configs.pred_len)\n\n        if configs.freeze and configs.pretrain:\n            for i, (name, param) in enumerate(self.gpt2.named_parameters()):\n                if 'ln' in name or 'wpe' in name:\n                    param.requires_grad = True\n                else:\n                    param.requires_grad = False\n\n        for layer in (self.gpt2, self.in_layer, self.out_layer):\n            layer.to(device=device)\n            layer.train()\n\n        self.cnt = 0\n\n    def forward(self, x, itr):\n        B, L, M = x.shape\n\n        means = x.mean(1, keepdim=True).detach()\n        x = x - means\n        stdev = torch.sqrt(torch.var(x, dim=1, keepdim=True, unbiased=False) + 1e-5).detach()\n        x /= stdev\n\n        x = rearrange(x, 'b l m -> b m l')\n\n        x = self.padding_patch_layer(x)\n        x = x.unfold(dimension=-1, size=self.patch_size, step=self.stride)\n        x = rearrange(x, 'b m n p -> (b m) n p')\n\n        outputs = self.in_layer(x)\n        if self.is_gpt:\n            outputs = self.gpt2(inputs_embeds=outputs).last_hidden_state\n\n        outputs = self.out_layer(outputs.reshape(B * M, -1))\n        outputs = rearrange(outputs, '(b m) l -> b l m', b=B)\n\n        outputs = outputs * stdev\n        outputs = outputs + means\n\n        return outputs\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/Informer.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Other_baselines.layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer\nfrom Other_baselines.layers.SelfAttention_Family import ProbAttention, AttentionLayer\nfrom Other_baselines.layers.Embed import DataEmbedding\n\n\nclass Model(nn.Module):\n    \"\"\"\n    Informer with Propspare attention in O(LlogL) complexity\n    Paper link: https://ojs.aaai.org/index.php/AAAI/article/view/17325/17132\n    \"\"\"\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        self.task_name = configs.task_name\n        self.pred_len = configs.pred_len\n        self.label_len = configs.label_len\n\n        # Embedding\n        self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,\n                                           configs.dropout)\n        self.dec_embedding = DataEmbedding(configs.dec_in, configs.d_model, configs.embed, configs.freq,\n                                           configs.dropout)\n\n        # Encoder\n        self.encoder = Encoder(\n            [\n                EncoderLayer(\n                    AttentionLayer(\n                        ProbAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                      output_attention=configs.output_attention),\n                        configs.d_model, configs.n_heads),\n                    configs.d_model,\n                    configs.d_ff,\n                    dropout=configs.dropout,\n                    activation=configs.activation\n                ) for l in range(configs.e_layers)\n            ],\n            [\n                ConvLayer(\n                    configs.d_model\n                ) for l in range(configs.e_layers - 1)\n            ] if configs.distil and ('forecast' in configs.task_name) else None,\n            norm_layer=torch.nn.LayerNorm(configs.d_model)\n        )\n        # Decoder\n        self.decoder = Decoder(\n            [\n                DecoderLayer(\n                    AttentionLayer(\n                        ProbAttention(True, configs.factor, attention_dropout=configs.dropout, output_attention=False),\n                        configs.d_model, configs.n_heads),\n                    AttentionLayer(\n                        ProbAttention(False, configs.factor, attention_dropout=configs.dropout, output_attention=False),\n                        configs.d_model, configs.n_heads),\n                    configs.d_model,\n                    configs.d_ff,\n                    dropout=configs.dropout,\n                    activation=configs.activation,\n                )\n                for l in range(configs.d_layers)\n            ],\n            norm_layer=torch.nn.LayerNorm(configs.d_model),\n            projection=nn.Linear(configs.d_model, configs.c_out, bias=True)\n        )\n        if self.task_name == 'imputation':\n            self.projection = nn.Linear(configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'anomaly_detection':\n            self.projection = nn.Linear(configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'classification':\n            self.act = F.gelu\n            self.dropout = nn.Dropout(configs.dropout)\n            self.projection = nn.Linear(configs.d_model * configs.seq_len, configs.num_class)\n\n    def long_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        dec_out = self.dec_embedding(x_dec, x_mark_dec)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        dec_out = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None)\n\n        return dec_out  # [B, L, D]\n\n    def short_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        # Normalization\n        mean_enc = x_enc.mean(1, keepdim=True).detach()  # B x 1 x E\n        x_enc = x_enc - mean_enc\n        std_enc = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5).detach()  # B x 1 x E\n        x_enc = x_enc / std_enc\n\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        dec_out = self.dec_embedding(x_dec, x_mark_dec)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        dec_out = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None)\n\n        dec_out = dec_out * std_enc + mean_enc\n        return dec_out  # [B, L, D]\n\n    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):\n        # enc\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n        # final\n        dec_out = self.projection(enc_out)\n        return dec_out\n\n    def anomaly_detection(self, x_enc):\n        # enc\n        enc_out = self.enc_embedding(x_enc, None)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n        # final\n        dec_out = self.projection(enc_out)\n        return dec_out\n\n    def classification(self, x_enc, x_mark_enc):\n        # enc\n        enc_out = self.enc_embedding(x_enc, None)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        # Output\n        output = self.act(enc_out)  # the output transformer encoder/decoder embeddings don't include non-linearity\n        output = self.dropout(output)\n        output = output * x_mark_enc.unsqueeze(-1)  # zero-out padding embeddings\n        output = output.reshape(output.shape[0], -1)  # (batch_size, seq_length * d_model)\n        output = self.projection(output)  # (batch_size, num_classes)\n        return output\n\n    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n        if self.task_name == 'long_term_forecast':\n            dec_out = self.long_forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'short_term_forecast':\n            dec_out = self.short_forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(x_enc, x_mark_enc, x_dec, x_mark_dec, mask)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc, x_mark_enc)\n            return dec_out  # [B, N]\n        return None\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/LogTrans.py",
    "content": "\"\"\"\nThis code is based on huggingface,\nhttps://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py\n\nMIT License\n\nCopyright (c) 2018 OpenAI\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OFS CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\"\"\"\n# Arxiv Link https://arxiv.org/pdf/1907.00235.pdf\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport math\nimport copy\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Function\n\n\ndef _make_ix_like(X, dim):\n    d = X.size(dim)\n    rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype)\n    view = [1] * X.dim()\n    view[0] = -1\n    return rho.view(view).transpose(0, dim)\n\n\ndef _roll_last(X, dim):\n    if dim == -1:\n        return X\n    elif dim < 0:\n        dim = X.dim() - dim\n\n    perm = [i for i in range(X.dim()) if i != dim] + [dim]\n    return X.permute(perm)\n\n\n\ndef _entmax_threshold_and_support(X, dim=-1, k=None):\n    \"\"\"Core computation for 1.5-entmax: optimal threshold and support size.\n    Parameters\n    ----------\n    X : torch.Tensor\n        The input tensor to compute thresholds over.\n    dim : int\n        The dimension along which to apply 1.5-entmax.\n    k : int or None\n        number of largest elements to partial-sort over. For optimal\n        performance, should be slightly bigger than the expected number of\n        nonzeros in the solution. If the solution is more than k-sparse,\n        this function is recursively called with a 2*k schedule.\n        If `None`, full sorting is performed from the beginning.\n    Returns\n    -------\n    tau : torch.Tensor like `X`, with all but the `dim` dimension intact\n        the threshold value for each vector\n    support_size : torch LongTensor, shape like `tau`\n        the number of nonzeros in each vector.\n    \"\"\"\n\n    if k is None or k >= X.shape[dim]:  # do full sort\n        Xsrt, _ = torch.sort(X, dim=dim, descending=True)\n    else:\n        Xsrt, _ = torch.topk(X, k=k, dim=dim)\n\n    rho = _make_ix_like(Xsrt, dim)\n    mean = Xsrt.cumsum(dim) / rho\n    mean_sq = (Xsrt ** 2).cumsum(dim) / rho\n    ss = rho * (mean_sq - mean ** 2)\n    delta = (1 - ss) / rho\n\n    # NOTE this is not exactly the same as in reference algo\n    # Fortunately it seems the clamped values never wrongly\n    # get selected by tau <= sorted_z. Prove this!\n    delta_nz = torch.clamp(delta, 0)\n    tau = mean - torch.sqrt(delta_nz)\n\n    support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim)\n    tau_star = tau.gather(dim, support_size - 1)\n\n    if k is not None and k < X.shape[dim]:\n        unsolved = (support_size == k).squeeze(dim)\n\n        if torch.any(unsolved):\n            X_ = _roll_last(X, dim)[unsolved]\n            tau_, ss_ = _entmax_threshold_and_support(X_, dim=-1, k=2 * k)\n            _roll_last(tau_star, dim)[unsolved] = tau_\n            _roll_last(support_size, dim)[unsolved] = ss_\n\n    return tau_star, support_size\n\n\nclass Entmax15Function(Function):\n    @classmethod\n    def forward(cls, ctx, X: torch.Tensor, dim=0, k=None):\n        ctx.dim = dim\n\n        max_val, _ = X.max(dim=dim, keepdim=True)\n        X = X - max_val  # same numerical stability trick as for softmax\n        X = X / 2  # divide by 2 to solve actual Entmax\n\n        tau_star, _ = _entmax_threshold_and_support(X, dim=dim, k=k)\n\n        Y = torch.clamp(X - tau_star, min=0) ** 2\n        ctx.save_for_backward(Y)\n        return Y\n\n    @classmethod\n    def backward(cls, ctx, dY):\n        Y, = ctx.saved_tensors\n        gppr = Y.sqrt()  # = 1 / g'' (Y)\n        dX = dY * gppr\n        q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)\n        q = q.unsqueeze(ctx.dim)\n        dX -= q * gppr\n        return dX, None, None\n\n\ndef entmax15(X, dim=-1, k=None):\n    \"\"\"1.5-entmax: normalizing sparse transform (a la softmax).\n    Solves the optimization problem:\n        max_p <x, p> - H_1.5(p)    s.t.    p >= 0, sum(p) == 1.\n    where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.\n    Parameters\n    ----------\n    X : torch.Tensor\n        The input tensor.\n    dim : int\n        The dimension along which to apply 1.5-entmax.\n    k : int or None\n        number of largest elements to partial-sort over. For optimal\n        performance, should be slightly bigger than the expected number of\n        nonzeros in the solution. If the solution is more than k-sparse,\n        this function is recursively called with a 2*k schedule.\n        If `None`, full sorting is performed from the beginning.\n    Returns\n    -------\n    P : torch tensor, same shape as X\n        The projection result, such that P.sum(dim=dim) == 1 elementwise.\n    \"\"\"\n\n    return Entmax15Function.apply(X, dim, k)\n\n\ndef _sparsemax_threshold_and_support(X, dim=-1, k=None):\n    \"\"\"Core computation for sparsemax: optimal threshold and support size.\n    Parameters\n    ----------\n    X : torch.Tensor\n        The input tensor to compute thresholds over.\n    dim : int\n        The dimension along which to apply sparsemax.\n    k : int or None\n        number of largest elements to partial-sort over. For optimal\n        performance, should be slightly bigger than the expected number of\n        nonzeros in the solution. If the solution is more than k-sparse,\n        this function is recursively called with a 2*k schedule.\n        If `None`, full sorting is performed from the beginning.\n    Returns\n    -------\n    tau : torch.Tensor like `X`, with all but the `dim` dimension intact\n        the threshold value for each vector\n    support_size : torch LongTensor, shape like `tau`\n        the number of nonzeros in each vector.\n    \"\"\"\n\n    if k is None or k >= X.shape[dim]:  # do full sort\n        topk, _ = torch.sort(X, dim=dim, descending=True)\n    else:\n        topk, _ = torch.topk(X, k=k, dim=dim)\n\n    topk_cumsum = topk.cumsum(dim) - 1\n    rhos = _make_ix_like(topk, dim)\n    support = rhos * topk > topk_cumsum\n\n    support_size = support.sum(dim=dim).unsqueeze(dim)\n    tau = topk_cumsum.gather(dim, support_size - 1)\n    tau /= support_size.to(X.dtype)\n\n    if k is not None and k < X.shape[dim]:\n        unsolved = (support_size == k).squeeze(dim)\n\n        if torch.any(unsolved):\n            in_ = _roll_last(X, dim)[unsolved]\n            tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k)\n            _roll_last(tau, dim)[unsolved] = tau_\n            _roll_last(support_size, dim)[unsolved] = ss_\n\n    return tau, support_size\n\n\nclass SparsemaxFunction(Function):\n    @classmethod\n    def forward(cls, ctx, X, dim=-1, k=None):\n        ctx.dim = dim\n        max_val, _ = X.max(dim=dim, keepdim=True)\n        X = X - max_val  # same numerical stability trick as softmax\n        tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k)\n        output = torch.clamp(X - tau, min=0)\n        ctx.save_for_backward(supp_size, output)\n        return output\n\n    @classmethod\n    def backward(cls, ctx, grad_output):\n        supp_size, output = ctx.saved_tensors\n        dim = ctx.dim\n        grad_input = grad_output.clone()\n        grad_input[output == 0] = 0\n\n        v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze(dim)\n        v_hat = v_hat.unsqueeze(dim)\n        grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)\n        return grad_input, None, None\n\n\ndef sparsemax(X, dim=-1, k=None):\n    \"\"\"sparsemax: normalizing sparse transform (a la softmax).\n    Solves the projection:\n        min_p ||x - p||_2   s.t.    p >= 0, sum(p) == 1.\n    Parameters\n    ----------\n    X : torch.Tensor\n        The input tensor.\n    dim : int\n        The dimension along which to apply sparsemax.\n    k : int or None\n        number of largest elements to partial-sort over. For optimal\n        performance, should be slightly bigger than the expected number of\n        nonzeros in the solution. If the solution is more than k-sparse,\n        this function is recursively called with a 2*k schedule.\n        If `None`, full sorting is performed from the beginning.\n    Returns\n    -------\n    P : torch tensor, same shape as X\n        The projection result, such that P.sum(dim=dim) == 1 elementwise.\n    \"\"\"\n\n    return SparsemaxFunction.apply(X, dim, k)\n\n\nclass Sparsemax(nn.Module):\n    def __init__(self, dim=-1, k=None):\n        \"\"\"sparsemax: normalizing sparse transform (a la softmax).\n        Solves the projection:\n            min_p ||x - p||_2   s.t.    p >= 0, sum(p) == 1.\n        Parameters\n        ----------\n        dim : int\n            The dimension along which to apply sparsemax.\n        k : int or None\n            number of largest elements to partial-sort over. For optimal\n            performance, should be slightly bigger than the expected number of\n            nonzeros in the solution. If the solution is more than k-sparse,\n            this function is recursively called with a 2*k schedule.\n            If `None`, full sorting is performed from the beginning.\n        \"\"\"\n        self.dim = dim\n        self.k = k\n        super(Sparsemax, self).__init__()\n\n    def forward(self, X):\n        return sparsemax(X, dim=self.dim, k=self.k)\n\n\ndef swish(x):\n    return x * torch.sigmoid(x)\n\n\nactivation_dict = {\"ReLU\": torch.nn.ReLU(), \"Softplus\": torch.nn.Softplus(), \"Swish\": swish,\n                   \"entmax\": entmax15, \"sparsemax\": sparsemax, \"Softmax\": torch.nn.Softmax}\n\n\n\ndef gelu(x):\n    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\n\ndef swish(x):\n    return x * torch.sigmoid(x)\n\nACT_FNS = {\n    'relu': nn.ReLU(),\n    'swish': swish,\n    'gelu': gelu\n}\n\n\nclass Attention(nn.Module):\n    def __init__(self, n_head, n_embd, win_len, scale, q_len, sub_len, sparse=True, attn_pdrop=0.1, resid_pdrop=0.1):\n        super(Attention, self).__init__()\n\n        if(sparse):\n            print('Activate log sparse!')\n            mask = self.log_mask(win_len, sub_len)\n        else:\n            mask = torch.tril(torch.ones(win_len, win_len)).view(1, 1, win_len, win_len)\n\n        self.register_buffer('mask_tri', mask)\n        self.n_head = n_head\n        self.split_size = n_embd * self.n_head\n        self.scale = scale\n        self.q_len = q_len\n        self.query_key = nn.Conv1d(n_embd, n_embd * n_head * 2, self.q_len)\n        self.value = Conv1D(n_embd * n_head, 1, n_embd)\n        self.c_proj = Conv1D(n_embd, 1, n_embd * self.n_head)\n        self.attn_dropout = nn.Dropout(attn_pdrop)\n        self.resid_dropout = nn.Dropout(resid_pdrop)\n\n    def log_mask(self, win_len, sub_len):\n        mask = torch.zeros((win_len, win_len), dtype=torch.float)\n        for i in range(win_len):\n            mask[i] = self.row_mask(i, sub_len, win_len)\n        return mask.view(1, 1, mask.size(0), mask.size(1))\n\n    def row_mask(self, index, sub_len, win_len):\n        \"\"\"\n        Remark:\n        1 . Currently, dense matrices with sparse multiplication are not supported by Pytorch. Efficient implementation\n            should deal with CUDA kernel, which we haven't implemented yet.\n\n        2 . Our default setting here use Local attention and Restart attention.\n\n        3 . For index-th row, if its past is smaller than the number of cells the last\n            cell can attend, we can allow current cell to attend all past cells to fully\n            utilize parallel computing in dense matrices with sparse multiplication.\"\"\"\n        log_l = math.ceil(np.log2(sub_len))\n        mask = torch.zeros((win_len), dtype=torch.float)\n        if((win_len // sub_len) * 2 * (log_l) > index):\n            mask[:(index + 1)] = 1\n        else:\n            while(index >= 0):\n                if((index - log_l + 1) < 0):\n                    mask[:index] = 1\n                    break\n                mask[index - log_l + 1:(index + 1)] = 1  # Local attention\n                for i in range(0, log_l):\n                    new_index = index - log_l + 1 - 2**i\n                    if((index - new_index) <= sub_len and new_index >= 0):\n                        mask[new_index] = 1\n                index -= sub_len\n        return mask\n\n    def attn(self, query: torch.Tensor, key, value: torch.Tensor, activation=\"Softmax\"):\n        activation = activation_dict[activation](dim=-1)\n        pre_att = torch.matmul(query, key)\n        if self.scale:\n            pre_att = pre_att / math.sqrt(value.size(-1))\n        mask = self.mask_tri[:, :, :pre_att.size(-2), :pre_att.size(-1)]\n        pre_att = pre_att * mask + -1e9 * (1 - mask)\n        pre_att = activation(pre_att)\n        pre_att = self.attn_dropout(pre_att)\n        attn = torch.matmul(pre_att, value)\n\n        return attn\n\n    def merge_heads(self, x):\n        x = x.permute(0, 2, 1, 3).contiguous()\n        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n        return x.view(*new_x_shape)\n\n    def split_heads(self, x, k=False):\n        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n        x = x.view(*new_x_shape)\n        if k:\n            return x.permute(0, 2, 3, 1)\n        else:\n            return x.permute(0, 2, 1, 3)\n\n    def forward(self, x):\n\n        value = self.value(x)\n        qk_x = nn.functional.pad(x.permute(0, 2, 1), pad=(self.q_len - 1, 0))\n        query_key = self.query_key(qk_x).permute(0, 2, 1)\n        query, key = query_key.split(self.split_size, dim=2)\n        query = self.split_heads(query)\n        key = self.split_heads(key, k=True)\n        value = self.split_heads(value)\n        attn = self.attn(query, key, value)\n        attn = self.merge_heads(attn)\n        attn = self.c_proj(attn)\n        attn = self.resid_dropout(attn)\n        return attn\n\n\nclass Conv1D(nn.Module):\n    def __init__(self, out_dim, rf, in_dim):\n        super(Conv1D, self).__init__()\n        self.rf = rf\n        self.out_dim = out_dim\n        if rf == 1:\n            w = torch.empty(in_dim, out_dim)\n            nn.init.normal_(w, std=0.02)\n            self.w = Parameter(w)\n            self.b = Parameter(torch.zeros(out_dim))\n        else:\n            raise NotImplementedError\n\n    def forward(self, x):\n        if self.rf == 1:\n            size_out = x.size()[:-1] + (self.out_dim,)\n            # print(\"test.shape = \", self.b.shape, x.view(-1, x.size(-1)).shape, self.w.shape, x.shape)\n            # if x.size(-1) > self.w.shape[0]:\n            #     # min_len = min(x.size(-1), self.w.shape[0])\n            #     self.w = torch.nn.functional.pad(self.w, (0, 0, 0, (x.size(-1)-self.w.shape[0])), \"constant\", 0)\n            #     self.w = torch.nn.Parameter(self.w)  # 转换为 torch.nn.Parameter\n            #     x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)\n            # else:\n            x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)\n            x = x.view(*size_out)\n        else:\n            raise NotImplementedError\n        return x\n\n\nclass LayerNorm(nn.Module):\n    \"Construct a layernorm module in the OpenAI style (epsilon inside the square root).\"\n\n    def __init__(self, n_embd, e=1e-5):\n        super(LayerNorm, self).__init__()\n        self.g = nn.Parameter(torch.ones(n_embd))\n        self.b = nn.Parameter(torch.zeros(n_embd))\n        self.e = e\n\n    def forward(self, x):\n        mu = x.mean(-1, keepdim=True)\n        sigma = (x - mu).pow(2).mean(-1, keepdim=True)\n        x = (x - mu) / torch.sqrt(sigma + self.e)\n        return self.g * x + self.b\n\n\nclass MLP(nn.Module):\n    def __init__(self, n_state, n_embd, acf='relu'):\n        super(MLP, self).__init__()\n        n_embd = n_embd\n        self.c_fc = Conv1D(n_state, 1, n_embd)\n        self.c_proj = Conv1D(n_embd, 1, n_state)\n        self.act = ACT_FNS[acf]\n        self.dropout = nn.Dropout(0.1)\n\n    def forward(self, x):\n        hidden1 = self.act(self.c_fc(x))\n        hidden2 = self.c_proj(hidden1)\n        return self.dropout(hidden2)\n\n\nclass Block(nn.Module):\n    def __init__(self, n_head, win_len, n_embd, scale, q_len, sub_len):\n        super(Block, self).__init__()\n        n_embd = n_embd\n        self.attn = Attention(n_head, n_embd, win_len, scale, q_len, sub_len)\n        self.ln_1 = LayerNorm(n_embd)\n        self.mlp = MLP(4 * n_embd, n_embd)\n        self.ln_2 = LayerNorm(n_embd)\n\n    def forward(self, x):\n        attn = self.attn(x)\n        ln1 = self.ln_1(x + attn)\n        mlp = self.mlp(ln1)\n        hidden = self.ln_2(ln1 + mlp)\n        return hidden\n\n\nclass TransformerModel(nn.Module):\n    \"\"\" Transformer model \"\"\"\n\n    def __init__(self, n_time_series, n_head, sub_len, num_layer, n_embd,\n                 forecast_history: int, dropout: float, scale_att, q_len, seq_num=None):\n        super(TransformerModel, self).__init__()\n        self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n        self.input_dim = n_time_series\n        self.n_head = n_head\n        self.seq_num = None\n        if seq_num:\n            self.seq_num = seq_num\n            self.id_embed = nn.Embedding(seq_num, n_embd)\n            nn.init.normal_(self.id_embed.weight, std=0.02)\n        self.n_embd = n_embd\n        self.win_len = forecast_history\n        # The following is the implementation of this paragraph\n        \"\"\" For positional encoding in Transformer, we use learnable position embedding.\n        For covariates, following [3], we use all or part of year, month, day-of-the-week,\n        hour-of-the-day, minute-of-the-hour, age and time-series-ID according to the granularities of datasets.\n        age is the distance to the first observation in that time series [3]. Each of them except time series\n        ID has only one dimension and is normalized to have zero mean and unit variance (if applicable).\n        \"\"\"\n        self.po_embed = nn.Embedding(forecast_history, n_embd)\n        self.drop_em = nn.Dropout(dropout)\n        block = Block(n_head, forecast_history, n_embd + n_time_series, scale=scale_att,\n                      q_len=q_len, sub_len=sub_len)\n        self.blocks = nn.ModuleList([copy.deepcopy(block) for _ in range(num_layer)])\n        nn.init.normal_(self.po_embed.weight, std=0.02)\n\n    def forward(self, series_id: int, x: torch.Tensor):\n        \"\"\"Runs  forward pass of the DecoderTransformer model.\n\n        :param series_id:   ID of the time series\n        :type series_id: int\n        :param x: [description]\n        :type x: torch.Tensor\n        :return: [description]\n        :rtype: [type]\n        \"\"\"\n        batch_size = x.size(0)\n        length = x.size(1)  # (Batch_size, length, input_dim)\n        embedding_sum = torch.zeros(batch_size, length, self.n_embd).to(self.device)\n        if self.seq_num:\n            embedding_sum = torch.zeros(batch_size, length)\n            embedding_sum = embedding_sum.fill_(series_id).type(torch.LongTensor).to(self.device)\n            embedding_sum = self.id_embed(embedding_sum)\n        #print(\"shape below\")\n        #print(embedding_sum.shape)\n        #print(x.shape)\n        #print(series_id)\n        position = torch.tensor(torch.arange(length), dtype=torch.long).to(self.device)\n        po_embedding = self.po_embed(position)\n        embedding_sum[:] = po_embedding\n        x = torch.cat((x, embedding_sum), dim=2)\n        for block in self.blocks:\n            x = block(x)\n        return x\n\n\nclass Model(nn.Module):\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        n_time_series = configs.enc_in\n        n_head = configs.n_heads\n        num_layer = configs.e_layers\n        n_embd = configs.d_model // configs.n_heads\n        forecast_history = configs.seq_len\n        dropout = configs.dropout\n        q_len = configs.seq_len // 6\n        activation = configs.activation\n        forecast_length = configs.pred_len\n        scale_att: bool = False\n        seq_num1=None\n        sub_len=1\n        mu=None\n        \"\"\"\n        Args:\n            n_time_series: Number of time series present in input\n            n_head: Number of heads in the MultiHeadAttention mechanism\n            seq_num: The number of targets to forecast\n            sub_len: sub_len of the sparse attention\n            num_layer: The number of transformer blocks in the model.\n            n_embd: The dimention of Position embedding and time series ID embedding\n            forecast_history: The number of historical steps fed into the time series model\n            dropout: The dropout for the embedding of the model.\n            additional_params: Additional parameters used to initalize the attention model. Can inc\n        \"\"\"\n        super(Model, self).__init__()\n        print(\"trans para = \", n_time_series, n_head, sub_len, num_layer, n_embd, forecast_history, dropout, scale_att, q_len)\n        self.transformer = TransformerModel(n_time_series, n_head, sub_len, num_layer, n_embd, forecast_history, dropout, scale_att, q_len, seq_num=seq_num1)\n        self.softplus = nn.Softplus()\n        self.mu = torch.nn.Linear(n_time_series + n_embd, n_time_series, bias=True)\n        self.sigma = torch.nn.Linear(n_time_series + n_embd, n_time_series, bias=True)\n        self._initialize_weights()\n        self.mu_mode = mu\n        self.forecast_len_layer = None\n        if forecast_length:\n            self.forecast_len_layer = nn.Linear(forecast_history, forecast_length)\n\n    def _initialize_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.normal_(m.weight, 0, 0.01)\n                if m.bias is not None:\n                    nn.init.constant_(m.bias, 0)\n            elif isinstance(m, nn.Linear):\n                nn.init.normal_(m.weight, 0, 0.01)\n                nn.init.constant_(m.bias, 0)\n\n    def forward(self, x: torch.Tensor, x_mark_enc, x_dec, x_mark_dec,\n                enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None, series_id: int = None):\n        \"\"\"\n        Args:\n            x: Tensor of dimension (batch_size, seq_len, number_of_time_series)\n            series_id: Optional id of the series in the dataframe. Currently  not supported\n        Returns:\n            Case 1: tensor of dimension (batch_size, forecast_length)\n            Case 2: GLoss sigma and mu: tuple of ((batch_size, forecast_history, 1), (batch_size, forecast_history, 1))\n        \"\"\"\n        # print(\"x.shape = \", x.shape)\n        # print(\" self.transformer = \",  self.transformer)\n        h = self.transformer(series_id, x)\n        # print(\"h.shape\")\n        mu = self.mu(h)\n        sigma = self.sigma(h)\n        if self.mu_mode:\n            sigma = self.softplus(sigma)\n            return mu, sigma\n        if self.forecast_len_layer:\n            # Swap to (batch_size, 1, features) for linear layer\n            sigma = sigma.permute(0, 2, 1)\n            # Output (batch_size, forecast_len_)\n            sigma = self.forecast_len_layer(sigma).permute(0, 2, 1)\n        return sigma\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/PatchTST.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom einops import rearrange\nfrom math import sqrt\nfrom Other_baselines.layers.Embed import DataEmbedding_wo_time\n\n\ndef l2norm(t):\n    return F.normalize(t, dim = -1)\n\nclass AttentionLayer(nn.Module):\n    def __init__(self, attention, d_model, n_heads, d_keys=None, d_values=None):\n        super(AttentionLayer, self).__init__()\n        \n        d_keys = d_keys or (d_model // n_heads)\n        d_values = d_values or (d_model // n_heads)\n\n        self.inner_attention = attention\n        self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.value_projection = nn.Linear(d_model, d_values * n_heads)\n        self.out_projection = nn.Linear(d_values * n_heads, d_model)\n        self.n_heads = n_heads\n\n    def forward(self, queries, keys, values, attn_mask, attn_bias):\n        B, L, _ = queries.shape\n        _, S, _ = keys.shape\n        H = self.n_heads\n        \n        queries = self.query_projection(queries).view(B, L, H, -1)\n        keys = self.key_projection(keys).view(B, S, H, -1)\n        values = self.value_projection(values).view(B, S, H, -1)\n\n        out, attn = self.inner_attention(\n            queries,\n            keys,\n            values,\n            attn_mask,\n            attn_bias\n        )\n        out = out.view(B, L, -1)\n\n        return self.out_projection(out), attn\n\nclass FullAttention(nn.Module):\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1,\n                 output_attention=False, configs=None,\n                 attn_scale_init=20):\n        super(FullAttention, self).__init__()\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n        self.enc_in = configs.enc_in\n       \n        self.scale = scale\n\n    def forward(self, queries, keys, values, attn_mask, attn_bias):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        scale = self.scale or 1. / sqrt(E)\n        \n        scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys)\n\n        if self.mask_flag:\n            # if attn_mask is None:\n            #     attn_mask = TriangularCausalMask(B, L, device=queries.device)\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        if attn_bias is not None:\n            attn_bias = attn_bias.permute(0, 3, 1, 2)\n            A = self.dropout(torch.softmax(scores * scale + attn_bias, dim=-1))\n        else:\n            A = self.dropout(torch.softmax(scores * scale, dim=-1))\n        V = torch.einsum(\"bhls,bshd->blhd\", A, values)\n\n        if self.output_attention:\n            return (V.contiguous(), A)\n        else:\n            return (V.contiguous(), None)\n\nclass EncoderLayer(nn.Module):\n    def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation=\"relu\"):\n        super(EncoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.attention = attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n        self.norm1 = nn.BatchNorm1d(d_model)\n        self.norm2 = nn.BatchNorm1d(d_model)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, attn_mask=None, attn_bias=None):\n        new_x, attn = self.attention(\n            x, x, x,\n            attn_mask=attn_mask,\n            attn_bias=attn_bias\n        )\n        x = x + self.dropout(new_x)\n        y = x = self.norm1(x.permute(0, 2, 1)).permute(0, 2, 1)\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n        y = x + y\n        y = self.norm2(y.permute(0, 2, 1)).permute(0, 2, 1)\n        return y, attn\n\nclass Encoder(nn.Module):\n    def __init__(self, attn_layers, conv_layers=None, norm_layer=None):\n        super(Encoder, self).__init__()\n        self.attn_layers = nn.ModuleList(attn_layers)\n        self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None\n        self.norm = norm_layer\n\n    def forward(self, x, attn_mask=None, attn_bias=None):\n        # x [B, L, D]\n        attns = []\n        if self.conv_layers is not None:\n            for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):\n                x, attn = attn_layer(x, attn_mask=attn_mask)\n                x = conv_layer(x)\n                attns.append(attn)\n            x, attn = self.attn_layers[-1](x)\n            attns.append(attn)\n        else:\n            for attn_layer in self.attn_layers:\n                x, attn = attn_layer(x, attn_mask=attn_mask, attn_bias=attn_bias)\n                attns.append(attn)\n\n        if self.norm is not None:\n            # x = self.norm(x)\n            x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1)\n\n        return x, attns\n\nclass PatchTST(nn.Module):\n    \"\"\"\n    Vanilla Transformer with O(L^2) complexity\n    \"\"\"\n    def __init__(self, configs, device):\n        super(PatchTST, self).__init__()\n\n        self.enc_in = configs.enc_in\n        self.patch_size = configs.patch_size\n        self.stride = configs.stride\n        self.patch_num = (configs.seq_len - self.patch_size) // self.stride + 1\n\n        self.label_len = configs.label_len\n        self.seq_len = configs.seq_len\n        self.pred_len = configs.pred_len\n        self.output_attention = False\n        self.num_heads = configs.n_heads\n        self.factor = 3\n        self.activation = 'gelu'\n        \n        # Embedding\n        self.enc_embedding = DataEmbedding_wo_time(self.patch_size, \n                                            configs.d_model, configs.embed, configs.freq, configs.dropout)\n\n        # Encoder\n        self.encoder = Encoder(\n            [\n                EncoderLayer(\n                    AttentionLayer(\n                        FullAttention(False, self.factor, attention_dropout=configs.dropout,\n                                      output_attention=self.output_attention,\n                                      configs=configs),\n                                      configs.d_model, configs.n_heads),\n                    configs.d_model,\n                    configs.d_ff,\n                    dropout=configs.dropout,\n                    activation=self.activation\n                ) for l in range(configs.e_layers)\n            ],\n            # norm_layer=torch.nn.LayerNorm(configs.d_model)\n            norm_layer=torch.nn.BatchNorm1d(configs.d_model)\n        )\n        \n        self.proj = nn.Linear(configs.d_model * self.patch_num, configs.pred_len, bias=True)\n        self.cnt = 0\n    \n    def forward(self, x_enc, itr):\n        B, L, M = x_enc.shape\n\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False)+ 1e-5).detach() \n        x_enc /= stdev\n\n        x_enc = rearrange(x_enc, 'b l m -> b m l')\n        x_enc = x_enc.unfold(dimension=-1, size=self.patch_size, step=self.stride)\n        x_enc = rearrange(x_enc, 'b m n p -> (b m) n p')\n\n        enc_out = self.enc_embedding(x_enc)\n\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n        \n        enc_out = self.proj(enc_out.reshape(B*M, -1))\n        enc_out = rearrange(enc_out, '(b m) l -> b l m', m=M)\n        # revin\n        enc_out = enc_out[:, -self.pred_len:, :]\n        enc_out = enc_out * stdev\n        enc_out = enc_out + means\n\n        x_enc = enc_out * stdev\n        x_enc = enc_out + means\n\n\n        if self.output_attention:\n            return enc_out, attns\n        else:\n            return enc_out  # [B, L, D]\n\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/PatchTST_raw.py",
    "content": "# Cell\nfrom typing import Callable, Optional\nimport torch\nfrom torch import nn\nfrom torch import Tensor\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom Other_baselines.layers.PatchTST_backbone import PatchTST_backbone\nfrom Other_baselines.layers.PatchTST_layers import series_decomp\n\n\nclass Model(nn.Module):\n    def __init__(self, configs, max_seq_len: Optional[int] = 1024, d_k: Optional[int] = None, d_v: Optional[int] = None,\n                 norm: str = 'BatchNorm', attn_dropout: float = 0.,\n                 act: str = \"gelu\", key_padding_mask: bool = 'auto', padding_var: Optional[int] = None,\n                 attn_mask: Optional[Tensor] = None, res_attention: bool = True,\n                 pre_norm: bool = False, store_attn: bool = False, pe: str = 'zeros', learn_pe: bool = True,\n                 pretrain_head: bool = False, head_type='flatten', verbose: bool = False, **kwargs):\n\n        super().__init__()\n\n        # load parameters\n        c_in = configs.enc_in\n        context_window = configs.seq_len\n        target_window = configs.pred_len\n\n        n_layers = configs.e_layers\n        n_heads = configs.n_heads\n        d_model = configs.d_model\n        d_ff = configs.d_ff\n        dropout = configs.dropout\n        fc_dropout = configs.fc_dropout\n        head_dropout = configs.head_dropout\n\n        individual = configs.individual\n\n        patch_len = configs.patch_len\n        stride = configs.stride\n        padding_patch = configs.padding_patch\n\n        revin = configs.revin\n        affine = configs.affine\n        subtract_last = configs.subtract_last\n\n        decomposition = configs.decomposition\n        kernel_size = configs.kernel_size\n\n        # model\n        self.decomposition = decomposition\n        if self.decomposition:\n            self.decomp_module = series_decomp(kernel_size)\n            self.model_trend = PatchTST_backbone(c_in=c_in, context_window=context_window, target_window=target_window,\n                                                 patch_len=patch_len, stride=stride,\n                                                 max_seq_len=max_seq_len, n_layers=n_layers, d_model=d_model,\n                                                 n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm,\n                                                 attn_dropout=attn_dropout,\n                                                 dropout=dropout, act=act, key_padding_mask=key_padding_mask,\n                                                 padding_var=padding_var,\n                                                 attn_mask=attn_mask, res_attention=res_attention, pre_norm=pre_norm,\n                                                 store_attn=store_attn,\n                                                 pe=pe, learn_pe=learn_pe, fc_dropout=fc_dropout,\n                                                 head_dropout=head_dropout, padding_patch=padding_patch,\n                                                 pretrain_head=pretrain_head, head_type=head_type,\n                                                 individual=individual, revin=revin, affine=affine,\n                                                 subtract_last=subtract_last, verbose=verbose, **kwargs)\n            self.model_res = PatchTST_backbone(c_in=c_in, context_window=context_window, target_window=target_window,\n                                               patch_len=patch_len, stride=stride,\n                                               max_seq_len=max_seq_len, n_layers=n_layers, d_model=d_model,\n                                               n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm,\n                                               attn_dropout=attn_dropout,\n                                               dropout=dropout, act=act, key_padding_mask=key_padding_mask,\n                                               padding_var=padding_var,\n                                               attn_mask=attn_mask, res_attention=res_attention, pre_norm=pre_norm,\n                                               store_attn=store_attn,\n                                               pe=pe, learn_pe=learn_pe, fc_dropout=fc_dropout,\n                                               head_dropout=head_dropout, padding_patch=padding_patch,\n                                               pretrain_head=pretrain_head, head_type=head_type, individual=individual,\n                                               revin=revin, affine=affine,\n                                               subtract_last=subtract_last, verbose=verbose, **kwargs)\n        else:\n            self.model = PatchTST_backbone(c_in=c_in, context_window=context_window, target_window=target_window,\n                                           patch_len=patch_len, stride=stride,\n                                           max_seq_len=max_seq_len, n_layers=n_layers, d_model=d_model,\n                                           n_heads=n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm,\n                                           attn_dropout=attn_dropout,\n                                           dropout=dropout, act=act, key_padding_mask=key_padding_mask,\n                                           padding_var=padding_var,\n                                           attn_mask=attn_mask, res_attention=res_attention, pre_norm=pre_norm,\n                                           store_attn=store_attn,\n                                           pe=pe, learn_pe=learn_pe, fc_dropout=fc_dropout, head_dropout=head_dropout,\n                                           padding_patch=padding_patch,\n                                           pretrain_head=pretrain_head, head_type=head_type, individual=individual,\n                                           revin=revin, affine=affine,\n                                           subtract_last=subtract_last, verbose=verbose, **kwargs)\n\n    def forward(self, x):  # x: [Batch, Input length, Channel]\n        if self.decomposition:\n            res_init, trend_init = self.decomp_module(x)\n            res_init, trend_init = res_init.permute(0, 2, 1), trend_init.permute(0, 2,\n                                                                                 1)  # x: [Batch, Channel, Input length]\n            res = self.model_res(res_init)\n            trend = self.model_trend(trend_init)\n            x = res + trend\n            x = x.permute(0, 2, 1)  # x: [Batch, Input length, Channel]\n        else:\n            x = x.permute(0, 2, 1)  # x: [Batch, Channel, Input length]\n            x = self.model(x)\n            x = x.permute(0, 2, 1)  # x: [Batch, Input length, Channel]\n        return x"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/TCN.py",
    "content": "import torch\nimport torch.nn as nn\nimport os\n\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Model\nfrom einops import rearrange\nfrom transformers.models.gpt2.configuration_gpt2 import GPT2Config\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils import weight_norm\n\n\nclass Chomp1d(nn.Module):\n    def __init__(self, chomp_size):\n        super(Chomp1d, self).__init__()\n        self.chomp_size = chomp_size\n\n    def forward(self, x):\n        return x[:, :, :-self.chomp_size].contiguous()\n\n\nclass TemporalBlock(nn.Module):\n    def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):\n        super(TemporalBlock, self).__init__()\n        self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,\n                                           stride=stride, padding=padding, dilation=dilation))\n        self.chomp1 = Chomp1d(padding)\n        self.relu1 = nn.ReLU()\n        self.dropout1 = nn.Dropout(dropout)\n\n        self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,\n                                           stride=stride, padding=padding, dilation=dilation))\n        self.chomp2 = Chomp1d(padding)\n        self.relu2 = nn.ReLU()\n        self.dropout2 = nn.Dropout(dropout)\n\n        self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,\n                                 self.conv2, self.chomp2, self.relu2, self.dropout2)\n        self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None\n        self.relu = nn.ReLU()\n        self.init_weights()\n\n    def init_weights(self):\n        self.conv1.weight.data.normal_(0, 0.01)\n        self.conv2.weight.data.normal_(0, 0.01)\n        if self.downsample is not None:\n            self.downsample.weight.data.normal_(0, 0.01)\n\n    def forward(self, x):\n        out = self.net(x)\n        res = x if self.downsample is None else self.downsample(x)\n        return self.relu(out + res)\n\n\nclass TemporalConvNet(nn.Module):\n    def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):\n        super(TemporalConvNet, self).__init__()\n        layers = []\n        num_levels = len(num_channels)\n        for i in range(num_levels):\n            dilation_size = 2 ** i\n            in_channels = num_inputs if i == 0 else num_channels[i-1]\n            out_channels = num_channels[i]\n            layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,\n                                     padding=(kernel_size-1) * dilation_size, dropout=dropout)]\n\n        self.network = nn.Sequential(*layers)\n\n    def forward(self, x):\n        return self.network(x)\n\n\n\nclass Model(nn.Module):\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        # self.is_gpt = configs.is_gpt\n        # self.patch_size = configs.patch_size\n        # self.pretrain = configs.pretrain\n        # self.stride = configs.stride\n\n        # self.encoder = nn.Embedding(configs.pred_len, configs.input_size)\n        self.tcn = TemporalConvNet(configs.input_size, configs.num_channels, kernel_size=configs.kernel_size, dropout=configs.dropout)\n        self.decoder = nn.Linear(configs.input_size, configs.pred_len)\n\n        self.init_weights()\n\n    def init_weights(self):\n        self.decoder.weight.data.normal_(0, 0.01)\n\n    def forward(self, x, x_mark_enc=None, x_dec=None, x_mark_dec=None, mask=None):\n        # input has dimension (N, L_in), and emb has dimension (N, L_in, C_in)\n        # print(\"x.shape = \", x.shape)\n        # print(\"self.tcn = \", self.tcn)\n        # emb = self.drop(self.encoder(x))\n        y = self.tcn(x)\n        # print(\"y.shape = \", y.shape, y[:, :, -1].shape)\n        # print(\"self.decoder = \", self.decoder)\n        result = []\n        for i in range(y.shape[-1]):\n            o = self.decoder(y[:, :, i])\n            result.append(o.contiguous().unsqueeze(-1))\n        result = torch.cat(result, dim=2)\n        # print(\"result.shape = \", result.shape)\n        return result\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/TEMPO.py",
    "content": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport os\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Model\nfrom einops import rearrange\nfrom transformers.models.gpt2.configuration_gpt2 import GPT2Config\nfrom transformers import GPT2Tokenizer\nfrom Other_baselines.utils.rev_in import RevIn\nfrom peft import get_peft_config, PeftModel, PeftConfig, get_peft_model, LoraConfig, TaskType\n\n\ncriterion = nn.MSELoss()\n\nclass ComplexLinear(nn.Module):\n    def __init__(self, input_dim, output_dim):\n        super(ComplexLinear, self).__init__()\n        self.fc_real = nn.Linear(input_dim, output_dim)\n        self.fc_imag = nn.Linear(input_dim, output_dim)\n\n    def forward(self, x):\n        x_real = torch.real(x)\n        x_imag = torch.imag(x)\n        out_real = self.fc_real(x_real) - self.fc_imag(x_imag)\n        out_imag = self.fc_real(x_imag) + self.fc_imag(x_real)\n        return torch.complex(out_real, out_imag)\n\n\ndef print_trainable_parameters(model):\n    trainable_params = 0\n    all_param = 0\n    for _, param in model.named_parameters():\n        all_param += param.numel()\n        if param.requires_grad:\n            trainable_params += param.numel()\n    print(\n        f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}\"\n    )\n\nclass MultiFourier(torch.nn.Module):\n    def __init__(self, N, P):\n        super(MultiFourier, self).__init__()\n        self.N = N\n        self.P = P\n        self.a = torch.nn.Parameter(torch.randn(max(N), len(N)), requires_grad=True)\n        self.b = torch.nn.Parameter(torch.randn(max(N), len(N)), requires_grad=True)\n    \n    def forward(self, t):\n        output = torch.zeros_like(t)\n        t = t.unsqueeze(-1).repeat(1, 1, max(self.N))  # shape: [batch_size, seq_len, max(N)]\n        n = torch.arange(max(self.N)).unsqueeze(0).unsqueeze(0).to(t.device)  # shape: [1, 1, max(N)]\n        for j in range(len(self.N)):  # loop over seasonal components\n            # import ipdb; ipdb.set_trace() \n            cos_terms = torch.cos(2 * np.pi * (n[..., :self.N[j]]+1) * t[..., :self.N[j]] / self.P[j])  # shape: [batch_size, seq_len, N[j]]\n            sin_terms = torch.sin(2 * np.pi * (n[..., :self.N[j]]+1) * t[..., :self.N[j]] / self.P[j])  # shape: [batch_size, seq_len, N[j]]\n            output += torch.matmul(cos_terms, self.a[:self.N[j], j]) + torch.matmul(sin_terms, self.b[:self.N[j], j])\n        return output\n\nclass moving_avg(nn.Module):\n    \"\"\"\n    Moving average block to highlight the trend of time series\n    \"\"\"\n    def __init__(self, kernel_size, stride):\n        super(moving_avg, self).__init__()\n        self.kernel_size = kernel_size\n        self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)\n\n    def forward(self, x):\n        # padding on the both ends of time series\n        front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)\n        end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)\n        x = torch.cat([front, x, end], dim=1)\n        x = self.avg(x.permute(0, 2, 1))\n        x = x.permute(0, 2, 1)\n        return x\n\nclass TEMPO(nn.Module):\n    \n    def __init__(self, configs, device):\n        super(TEMPO, self).__init__()\n        self.is_gpt = configs.is_gpt\n        self.patch_size = configs.patch_size\n        self.pretrain = configs.pretrain\n        self.stride = configs.stride\n        self.patch_num = (configs.seq_len - self.patch_size) // self.stride + 1\n        self.mul_season = MultiFourier([2], [24*4]) #, [ 24, 24*4])\n\n        self.padding_patch_layer = nn.ReplicationPad1d((0, self.stride)) \n        self.patch_num += 1\n        # self.mlp = configs.mlp\n\n        self.map_trend = nn.Linear(configs.seq_len, configs.seq_len)\n        self.map_season  = nn.Sequential(\n            nn.Linear(configs.seq_len, 4*configs.seq_len),\n            nn.ReLU(),\n            nn.Linear(4*configs.seq_len, configs.seq_len)\n        )\n\n        # #self.map_season = nn.Linear(configs.seq_len, configs.seq_len)\n        self.map_resid = nn.Linear(configs.seq_len, configs.seq_len)\n\n        kernel_size = 25\n        self.moving_avg = moving_avg(kernel_size, stride=1)\n\n        \n        if configs.is_gpt:\n            if configs.pretrain:\n\n                if not os.path.exists(\"/dev_data/lz/gpt2\"):\n                    self.gpt2_trend = GPT2Model.from_pretrained('/SSD/lz/gpt2', output_attentions=True,\n                                                          output_hidden_states=True)\n                else:\n                    self.gpt2_trend = GPT2Model.from_pretrained('/dev_data/lz/gpt2', output_attentions=True,\n                                                          output_hidden_states=True)\n\n                # self.gpt2_trend = GPT2Model.from_pretrained('gpt2', output_attentions=True, output_hidden_states=True)  # loads a pretrained GPT-2 base model\n                # self.gpt2_season = GPT2Model.from_pretrained('gpt2', output_attentions=True, output_hidden_states=True)  # loads a pretrained GPT-2 base model\n                # self.gpt2_noise = GPT2Model.from_pretrained('gpt2', output_attentions=True, output_hidden_states=True)  # loads a pretrained GPT-2 base model\n            else:\n                print(\"------------------no pretrain------------------\")\n                self.gpt2_trend = GPT2Model(GPT2Config())\n                self.gpt2_season = GPT2Model(GPT2Config())\n                self.gpt2_noise = GPT2Model(GPT2Config())\n            self.gpt2_trend.h = self.gpt2_trend.h[:configs.gpt_layers]\n           \n            self.prompt = configs.prompt\n            # \n            # self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n\n            if not os.path.exists(\"/dev_data/lz/gpt2\"):\n                self.tokenizer = GPT2Tokenizer.from_pretrained('/SSD/lz/gpt2')\n            else:\n                self.tokenizer = GPT2Tokenizer.from_pretrained('/dev_data/lz/gpt2')\n\n            self.gpt2_trend_token = self.tokenizer(text=\"Predict the future time step given the trend\", return_tensors=\"pt\").to(device)\n            self.gpt2_season_token = self.tokenizer(text=\"Predict the future time step given the season\", return_tensors=\"pt\").to(device)\n            self.gpt2_residual_token = self.tokenizer(text=\"Predict the future time step given the residual\", return_tensors=\"pt\").to(device)\n\n\n            self.token_len = len(self.gpt2_trend_token['input_ids'][0])\n\n            try:\n                self.pool = configs.pool\n                if self.pool:\n                    self.prompt_record_plot = {}\n                    self.prompt_record_id = 0\n                    self.diversify = True\n\n            except:\n                self.pool = False\n\n            if self.pool:\n                self.prompt_key_dict = nn.ParameterDict({})\n                self.prompt_value_dict = nn.ParameterDict({})\n                # self.summary_map = nn.Linear(self.token_len, 1)\n                self.summary_map = nn.Linear(self.patch_num, 1)\n                self.pool_size = 30\n                self.top_k = 3\n                self.prompt_len = 3\n                self.token_len = self.prompt_len * self.top_k\n                for i in range(self.pool_size):\n                    prompt_shape = (self.prompt_len, 768)\n                    key_shape = (768)\n                    self.prompt_value_dict[f\"prompt_value_{i}\"] = nn.Parameter(torch.randn(prompt_shape))\n                    self.prompt_key_dict[f\"prompt_key_{i}\"] = nn.Parameter(torch.randn(key_shape))\n            \n                self.prompt_record = {f\"id_{i}\": 0 for i in range(self.pool_size)}\n                self.prompt_record_trend = {}\n                self.prompt_record_season = {}\n                self.prompt_record_residual = {}\n                self.diversify = True\n\n\n        self.in_layer_trend = nn.Linear(configs.patch_size, configs.d_model)\n        self.in_layer_season = nn.Linear(configs.patch_size, configs.d_model)\n        self.in_layer_noise = nn.Linear(configs.patch_size, configs.d_model)\n        # self.out_layer_noise = nn.Linear(configs.d_model * self.patch_num, configs.pred_len)\n\n        if configs.prompt == 1:\n            # print((configs.d_model+9) * self.patch_num)\n            self.use_token = configs.use_token\n            if self.use_token == 1: # if use prompt token's representation as the forecasting's information\n                    self.out_layer_trend = nn.Linear(configs.d_model * (self.patch_num+self.token_len), configs.pred_len)\n                    self.out_layer_season = nn.Linear(configs.d_model * (self.patch_num+self.token_len), configs.pred_len)\n                    self.out_layer_noise = nn.Linear(configs.d_model * (self.patch_num+self.token_len), configs.pred_len)\n            else:\n                self.out_layer_trend = nn.Linear(configs.d_model * self.patch_num, configs.pred_len)\n                self.out_layer_season = nn.Linear(configs.d_model * self.patch_num, configs.pred_len)\n                self.out_layer_noise = nn.Linear(configs.d_model * self.patch_num, configs.pred_len)\n                # self.fre_len = configs.seq_len # // 2 + 1\n                # self.out_layer_noise_fre = ComplexLinear(self.fre_len, configs.pred_len)\n                # self.pred_len = configs.pred_len\n                # self.seq_len = configs.seq_len\n\n\n            self.prompt_layer_trend = nn.Linear(configs.d_model, configs.d_model)\n            self.prompt_layer_season = nn.Linear(configs.d_model, configs.d_model)\n            self.prompt_layer_noise = nn.Linear(configs.d_model, configs.d_model)\n\n            for layer in (self.prompt_layer_trend, self.prompt_layer_season, self.prompt_layer_noise):\n                layer.to(device=device)\n                layer.train()\n        else:\n            self.out_layer_trend = nn.Linear(configs.d_model * self.patch_num, configs.pred_len)\n            self.out_layer_season = nn.Linear(configs.d_model * self.patch_num, configs.pred_len)\n            self.out_layer_noise = nn.Linear(configs.d_model * self.patch_num, configs.pred_len)\n\n\n        \n        if configs.freeze and configs.pretrain:\n            for i, (name, param) in enumerate(self.gpt2_trend.named_parameters()):\n                if 'ln' in name or 'wpe' in name:\n                    param.requires_grad = True\n                else:\n                    param.requires_grad = False\n\n        config = LoraConfig(\n            # task_type=TaskType.CAUSAL_LM, # causal language model\n            r=16,\n            lora_alpha=16,\n            # target_modules=[\"query\", \"value\"],\n            lora_dropout=0.1,\n            bias=\"lora_only\",               # bias, set to only lora layers to train\n            # modules_to_save=[\"classifier\"],\n        )\n         \n        self.gpt2_trend = get_peft_model(self.gpt2_trend, config)\n        print_trainable_parameters(self.gpt2_trend)\n\n\n        for layer in (self.gpt2_trend, self.in_layer_trend, self.out_layer_trend, \\\n                      self.in_layer_season, self.out_layer_season, self.in_layer_noise, self.out_layer_noise):\n            layer.to(device=device)\n            layer.train()\n\n        for layer in (self.map_trend, self.map_season, self.map_resid):\n            layer.to(device=device)\n            layer.train()\n        \n        \n        self.cnt = 0\n\n        self.num_nodes = configs.num_nodes\n        self.rev_in_trend = RevIn(num_features=self.num_nodes).to(device)\n        self.rev_in_season = RevIn(num_features=self.num_nodes).to(device)\n        self.rev_in_noise = RevIn(num_features=self.num_nodes).to(device)\n\n        \n\n        \n    def store_tensors_in_dict(self, original_x, original_trend, original_season, original_noise, trend_prompts, season_prompts, noise_prompts):\n        # Assuming prompts are lists of tuples       \n        self.prompt_record_id += 1 \n        for i in range(original_x.size(0)):\n            self.prompt_record_plot[self.prompt_record_id + i] = {\n                'original_x': original_x[i].tolist(),\n                'original_trend': original_trend[i].tolist(),\n                'original_season': original_season[i].tolist(),\n                'original_noise': original_noise[i].tolist(),\n                'trend_prompt': trend_prompts[i],\n                'season_prompt': season_prompts[i],\n                'noise_prompt': noise_prompts[i],\n            }\n        \n\n\n    def l2_normalize(self, x, dim=None, epsilon=1e-12):\n        \"\"\"Normalizes a given vector or matrix.\"\"\"\n        square_sum = torch.sum(x ** 2, dim=dim, keepdim=True)\n        x_inv_norm = torch.rsqrt(torch.maximum(square_sum, torch.tensor(epsilon, device=x.device)))\n        return x * x_inv_norm\n\n    def select_prompt(self, summary, prompt_mask=None):\n        prompt_key_matrix = torch.stack(tuple([self.prompt_key_dict[i] for i in self.prompt_key_dict.keys()]))\n        prompt_norm = self.l2_normalize(prompt_key_matrix, dim=1) # Pool_size, C\n        summary_reshaped = summary.view(-1, self.patch_num)\n        summary_mapped = self.summary_map(summary_reshaped)\n        summary = summary_mapped.view(-1, 768)\n        summary_embed_norm = self.l2_normalize(summary, dim=1)\n        similarity = torch.matmul(summary_embed_norm, prompt_norm.t())\n        if not prompt_mask==None:\n            idx = prompt_mask\n        else:\n            topk_sim, idx = torch.topk(similarity, k=self.top_k, dim=1)\n        if prompt_mask==None:\n            count_of_keys = torch.bincount(torch.flatten(idx), minlength=15)\n            for i in range(len(count_of_keys)):\n                self.prompt_record[f\"id_{i}\"] += count_of_keys[i].item()\n\n\n        prompt_value_matrix = torch.stack(tuple([self.prompt_value_dict[i] for i in self.prompt_value_dict.keys()]))\n        batched_prompt_raw = prompt_value_matrix[idx].squeeze(1)\n        batch_size, top_k, length, c = batched_prompt_raw.shape # [16, 3, 5, 768]\n        batched_prompt = batched_prompt_raw.reshape(batch_size, top_k * length, c) \n       \n        batched_key_norm = prompt_norm[idx]\n        summary_embed_norm = summary_embed_norm.unsqueeze(1)\n        sim = batched_key_norm * summary_embed_norm\n        reduce_sim = torch.sum(sim) / summary.shape[0]\n\n        # Return the sorted tuple of selected prompts along with batched_prompt and reduce_sim\n        selected_prompts = [tuple(sorted(row)) for row in idx.tolist()]\n        # print(\"reduce_sim: \", reduce_sim)\n\n        return batched_prompt, reduce_sim, selected_prompts\n\n\n    def get_norm(self, x, d = 'norm'):\n        # if d == 'norm':\n        means = x.mean(1, keepdim=True).detach()\n        x = x - means\n        stdev = torch.sqrt(torch.var(x, dim=1, keepdim=True, unbiased=False)+ 1e-5).detach() \n        x /= stdev\n\n        return x, means, stdev\n    \n    def get_patch(self, x):\n        x = rearrange(x, 'b l m -> b m l')\n        x = self.padding_patch_layer(x) # 4, 1, 420\n        x = x.unfold(dimension=-1, size=self.patch_size, step=self.stride) #4,1, 64, 16\n        x = rearrange(x, 'b m n p -> (b m) n p') # 4, 64, 16\n\n        return x\n    \n    def get_emb(self, x, tokens=None, type = 'Trend'):\n        if tokens is None:\n            if type == 'Trend':\n                x = self.gpt2_trend(inputs_embeds =x).last_hidden_state\n            elif type == 'Season':\n                x = self.gpt2_trend(inputs_embeds =x).last_hidden_state\n            elif type == 'Residual':\n                x = self.gpt2_trend(inputs_embeds =x).last_hidden_state\n            return x\n        else:\n            [a,b,c] = x.shape\n          \n            \n            if type == 'Trend': \n                if self.pool:\n                    prompt_x, reduce_sim, selected_prompts_trend = self.select_prompt(x, prompt_mask=None)\n                    for selected_prompt_trend in selected_prompts_trend:\n                        self.prompt_record_trend[selected_prompt_trend] = self.prompt_record_trend.get(selected_prompt_trend, 0) + 1\n                    selected_prompts = selected_prompts_trend\n                else:\n                    prompt_x = self.gpt2_trend.wte(tokens)\n                    prompt_x = prompt_x.repeat(a,1,1)\n                    prompt_x = self.prompt_layer_trend(prompt_x)\n                x = torch.cat((prompt_x, x), dim=1)\n                \n\n            elif type == 'Season':\n                if self.pool:\n                    prompt_x, reduce_sim, selected_prompts_season = self.select_prompt(x, prompt_mask=None)\n                    for selected_prompt_season in selected_prompts_season:\n                        self.prompt_record_season[selected_prompt_season] = self.prompt_record_season.get(selected_prompt_season, 0) + 1\n                    selected_prompts = selected_prompts_season\n                else:\n                    prompt_x = self.gpt2_trend.wte(tokens)\n                    prompt_x = prompt_x.repeat(a,1,1)\n                    prompt_x = self.prompt_layer_season(prompt_x)\n                \n                x = torch.cat((prompt_x, x), dim=1)\n                # x = self.gpt2_trend(inputs_embeds =x_all).last_hidden_state\n                \n            elif type == 'Residual':\n                if self.pool:\n                    prompt_x, reduce_sim, selected_prompts_resid = self.select_prompt(x, prompt_mask=None)\n                    for selected_prompt_resid in selected_prompts_resid:\n                        self.prompt_record_residual[selected_prompt_resid] = self.prompt_record_residual.get(selected_prompt_resid, 0) + 1\n                    selected_prompts = selected_prompts_resid\n                else:\n                    prompt_x = self.gpt2_trend.wte(tokens)\n                    prompt_x = prompt_x.repeat(a,1,1)\n                    prompt_x = self.prompt_layer_noise(prompt_x)\n                # prompt_x, reduce_sim_trend = self.select_prompt(x, prompt_mask=None)\n                \n                x = torch.cat((prompt_x, x), dim=1)\n                \n            if self.pool:\n                return x, reduce_sim, selected_prompts\n            else:\n                return x\n\n\n    def forward(self, x, itr, trend, season, noise, test=False):\n        B, L, M = x.shape # 4, 512, 1\n\n       \n        x = self.rev_in_trend(x, 'norm')\n\n        original_x = x\n        \n        trend_local = self.moving_avg(x)\n        trend_local = self.map_trend(trend_local.squeeze()).unsqueeze(2)\n        season_local = x - trend_local\n        # print(season_local.squeeze().shape)\n        season_local = self.map_season(season_local.squeeze().unsqueeze(1)).squeeze(1).unsqueeze(2)\n        noise_local = x - trend_local - season_local\n\n        \n        trend, means_trend, stdev_trend = self.get_norm(trend)\n        season, means_season, stdev_season = self.get_norm(season)\n        noise, means_noise, stdev_noise = self.get_norm(noise)\n\n        if trend is not None:\n            trend_local_l = criterion(trend, trend_local)\n            season_local_l = criterion(season, season_local)\n            noise_local_l = criterion(noise, noise_local)\n            \n            loss_local = trend_local_l + season_local_l + noise_local_l \n            #import ipdb; ipdb.set_trace()\n            if test:\n                print(\"trend local loss:\", torch.mean(trend_local_l))\n                print(\"Season local loss\", torch.mean(season_local_l))\n                print(\"noise local loss\", torch.mean(noise_local_l))\n\n\n        trend = self.get_patch(trend_local)\n        season = self.get_patch(season_local)\n        noise = self.get_patch(noise_local)\n\n    \n        trend = self.in_layer_trend(trend) # 4, 64, 768\n        if self.is_gpt and self.prompt == 1:\n            if self.pool:\n                trend, reduce_sim_trend, trend_selected_prompts = self.get_emb(trend, self.gpt2_trend_token['input_ids'], 'Trend')\n            else:\n                trend = self.get_emb(trend, self.gpt2_trend_token['input_ids'], 'Trend')\n        else:\n            trend = self.get_emb(trend)\n\n        season = self.in_layer_season(season) # 4, 64, 768\n        if self.is_gpt and self.prompt == 1:\n            if self.pool:\n                season, reduce_sim_season, season_selected_prompts = self.get_emb(season, self.gpt2_season_token['input_ids'], 'Season')\n            else:\n                season = self.get_emb(season, self.gpt2_season_token['input_ids'], 'Season')\n        else:\n            season = self.get_emb(season)\n\n        noise = self.in_layer_noise(noise)\n        if self.is_gpt and self.prompt == 1:\n            if self.pool:\n                noise, reduce_sim_noise, noise_selected_prompts = self.get_emb(noise, self.gpt2_residual_token['input_ids'], 'Residual')\n            else:\n                noise = self.get_emb(noise, self.gpt2_residual_token['input_ids'], 'Residual')\n        else:\n            noise = self.get_emb(noise)\n\n        # print(noise_selected_prompts)\n\n        # self.store_tensors_in_dict(original_x, trend_local, season_local, noise_local, trend_selected_prompts, season_selected_prompts, noise_selected_prompts)\n        \n\n        x_all = torch.cat((trend, season, noise), dim=1)\n\n        x = self.gpt2_trend(inputs_embeds =x_all).last_hidden_state \n        \n        if self.prompt == 1:\n            trend  = x[:, :self.token_len+self.patch_num, :]  \n            season  = x[:, self.token_len+self.patch_num:2*self.token_len+2*self.patch_num, :]  \n            noise = x[:, 2*self.token_len+2*self.patch_num:, :]\n            if self.use_token == 0:\n                trend = trend[:, self.token_len:, :]\n                season = season[:, self.token_len:, :]\n                noise = noise[:, self.token_len:, :]    \n        else:\n            trend  = x[:, :self.patch_num, :]  \n            season  = x[:, self.patch_num:2*self.patch_num, :]  \n            noise = x[:, 2*self.patch_num:, :] \n            \n        \n        trend = self.out_layer_trend(trend.reshape(B*M, -1)) # 4, 96\n        trend = rearrange(trend, '(b m) l -> b l m', b=B) # 4, 96, 1\n        \n        season = self.out_layer_season(season.reshape(B*M, -1)) # 4, 96\n        # print(season.shape)\n        season = rearrange(season, '(b m) l -> b l m', b=B) # 4, 96, 1\n        # season = season * stdev_season + means_season\n\n        \n        noise = self.out_layer_noise(noise.reshape(B*M, -1)) # 4, 96\n        noise = rearrange(noise, '(b m) l -> b l m', b=B)\n        # noise = noise * stdev_noise + means_noise\n        \n        outputs = trend + season + noise #season #trend # #+ noise\n\n        # outputs = outputs * stdev + means\n        outputs = self.rev_in_trend(outputs, 'denorm')\n        # if self.pool:\n        #     return outputs, loss_local #loss_local - reduce_sim_trend - reduce_sim_season - reduce_sim_noise\n        return outputs, loss_local"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/TimesNet.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft\nfrom Other_baselines.layers.Embed import DataEmbedding\nfrom Other_baselines.layers.Conv_Blocks import Inception_Block_V1\n\n\ndef FFT_for_Period(x, k=2):\n    # [B, T, C]\n    xf = torch.fft.rfft(x, dim=1)\n    # find period by amplitudes\n    frequency_list = abs(xf).mean(0).mean(-1)\n    frequency_list[0] = 0\n    _, top_list = torch.topk(frequency_list, k)\n    top_list = top_list.detach().cpu().numpy()\n    period = x.shape[1] // top_list\n    return period, abs(xf).mean(-1)[:, top_list]\n\n\nclass TimesBlock(nn.Module):\n    def __init__(self, configs):\n        super(TimesBlock, self).__init__()\n        self.seq_len = configs.seq_len\n        self.pred_len = configs.pred_len\n        self.k = configs.top_k\n        # parameter-efficient design\n        self.conv = nn.Sequential(\n            Inception_Block_V1(configs.d_model, configs.d_ff,\n                               num_kernels=configs.num_kernels),\n            nn.GELU(),\n            Inception_Block_V1(configs.d_ff, configs.d_model,\n                               num_kernels=configs.num_kernels)\n        )\n\n    def forward(self, x):\n        B, T, N = x.size()\n        period_list, period_weight = FFT_for_Period(x, self.k)\n\n        res = []\n        for i in range(self.k):\n            period = period_list[i]\n            # padding\n            if (self.seq_len + self.pred_len) % period != 0:\n                length = (\n                                 ((self.seq_len + self.pred_len) // period) + 1) * period\n                padding = torch.zeros([x.shape[0], (length - (self.seq_len + self.pred_len)), x.shape[2]]).to(x.device)\n                out = torch.cat([x, padding], dim=1)\n            else:\n                length = (self.seq_len + self.pred_len)\n                out = x\n            # reshape\n            out = out.reshape(B, length // period, period,\n                              N).permute(0, 3, 1, 2).contiguous()\n            # 2D conv: from 1d Variation to 2d Variation\n            out = self.conv(out)\n            # reshape back\n            out = out.permute(0, 2, 3, 1).reshape(B, -1, N)\n            res.append(out[:, :(self.seq_len + self.pred_len), :])\n        res = torch.stack(res, dim=-1)\n        # adaptive aggregation\n        period_weight = F.softmax(period_weight, dim=1)\n        period_weight = period_weight.unsqueeze(\n            1).unsqueeze(1).repeat(1, T, N, 1)\n        res = torch.sum(res * period_weight, -1)\n        # residual connection\n        res = res + x\n        return res\n\n\nclass Model(nn.Module):\n    \"\"\"\n    Paper link: https://openreview.net/pdf?id=ju_Uqw384Oq\n    \"\"\"\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        self.configs = configs\n        self.task_name = configs.task_name\n        self.seq_len = configs.seq_len\n        self.label_len = configs.label_len\n        self.pred_len = configs.pred_len\n        self.model = nn.ModuleList([TimesBlock(configs)\n                                    for _ in range(configs.e_layers)])\n        self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,\n                                           configs.dropout)\n        self.layer = configs.e_layers\n        self.layer_norm = nn.LayerNorm(configs.d_model)\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            self.predict_linear = nn.Linear(\n                self.seq_len, self.pred_len + self.seq_len)\n            self.projection = nn.Linear(\n                configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'imputation' or self.task_name == 'anomaly_detection':\n            self.projection = nn.Linear(\n                configs.d_model, configs.c_out, bias=True)\n        if self.task_name == 'classification':\n            self.act = F.gelu\n            self.dropout = nn.Dropout(configs.dropout)\n            self.projection = nn.Linear(\n                configs.d_model * configs.seq_len, configs.num_class)\n\n    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(\n            torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)  # [B,T,C]\n        enc_out = self.predict_linear(enc_out.permute(0, 2, 1)).permute(\n            0, 2, 1)  # align temporal dimension\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):\n        # Normalization from Non-stationary Transformer\n        means = torch.sum(x_enc, dim=1) / torch.sum(mask == 1, dim=1)\n        means = means.unsqueeze(1).detach()\n        x_enc = x_enc - means\n        x_enc = x_enc.masked_fill(mask == 0, 0)\n        stdev = torch.sqrt(torch.sum(x_enc * x_enc, dim=1) /\n                           torch.sum(mask == 1, dim=1) + 1e-5)\n        stdev = stdev.unsqueeze(1).detach()\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def anomaly_detection(self, x_enc):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(\n            torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        # embedding\n        enc_out = self.enc_embedding(x_enc, None)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n        # porject back\n        dec_out = self.projection(enc_out)\n\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * \\\n                  (stdev[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        dec_out = dec_out + \\\n                  (means[:, 0, :].unsqueeze(1).repeat(\n                      1, self.pred_len + self.seq_len, 1))\n        return dec_out\n\n    def classification(self, x_enc, x_mark_enc):\n        # embedding\n        enc_out = self.enc_embedding(x_enc, None)  # [B,T,C]\n        # TimesNet\n        for i in range(self.layer):\n            enc_out = self.layer_norm(self.model[i](enc_out))\n\n        # Output\n        # the output transformer encoder/decoder embeddings don't include non-linearity\n        output = self.act(enc_out)\n        output = self.dropout(output)\n        # zero-out padding embeddings\n        output = output * x_mark_enc.unsqueeze(-1)\n        # (batch_size, seq_length * d_model)\n        output = output.reshape(output.shape[0], -1)\n        output = self.projection(output)  # (batch_size, num_classes)\n        return output\n\n    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(\n                x_enc, x_mark_enc, x_dec, x_mark_dec, mask)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc, x_mark_enc)\n            return dec_out  # [B, N]\n        return None\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/models/iTransformer.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Other_baselines.layers.Transformer_EncDec import Encoder, EncoderLayer\nfrom Other_baselines.layers.SelfAttention_Family import FullAttention, AttentionLayer\nfrom Other_baselines.layers.Embed import DataEmbedding_inverted\nimport numpy as np\n\n\nclass Model(nn.Module):\n    \"\"\"\n    Paper link: https://arxiv.org/abs/2310.06625\n    \"\"\"\n\n    def __init__(self, configs):\n        super(Model, self).__init__()\n        self.task_name = configs.task_name\n        self.seq_len = configs.seq_len\n        self.pred_len = configs.pred_len\n        self.output_attention = configs.output_attention\n        # Embedding\n        self.enc_embedding = DataEmbedding_inverted(configs.seq_len, configs.d_model, configs.embed, configs.freq,\n                                                    configs.dropout)\n        # Encoder\n        self.encoder = Encoder(\n            [\n                EncoderLayer(\n                    AttentionLayer(\n                        FullAttention(False, configs.factor, attention_dropout=configs.dropout,\n                                      output_attention=configs.output_attention), configs.d_model, configs.n_heads),\n                    configs.d_model,\n                    configs.d_ff,\n                    dropout=configs.dropout,\n                    activation=configs.activation\n                ) for l in range(configs.e_layers)\n            ],\n            norm_layer=torch.nn.LayerNorm(configs.d_model)\n        )\n        # Decoder\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            self.projection = nn.Linear(configs.d_model, configs.pred_len, bias=True)\n        if self.task_name == 'imputation':\n            self.projection = nn.Linear(configs.d_model, configs.seq_len, bias=True)\n        if self.task_name == 'anomaly_detection':\n            self.projection = nn.Linear(configs.d_model, configs.seq_len, bias=True)\n        if self.task_name == 'classification':\n            self.act = F.gelu\n            self.dropout = nn.Dropout(configs.dropout)\n            self.projection = nn.Linear(configs.d_model * configs.enc_in, configs.num_class)\n\n    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        _, _, N = x_enc.shape\n\n        # Embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        dec_out = self.projection(enc_out).permute(0, 2, 1)[:, :, :N]\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1))\n        dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1))\n        return dec_out\n\n    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        _, L, N = x_enc.shape\n\n        # Embedding\n        enc_out = self.enc_embedding(x_enc, x_mark_enc)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        dec_out = self.projection(enc_out).permute(0, 2, 1)[:, :, :N]\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, L, 1))\n        dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, L, 1))\n        return dec_out\n\n    def anomaly_detection(self, x_enc):\n        # Normalization from Non-stationary Transformer\n        means = x_enc.mean(1, keepdim=True).detach()\n        x_enc = x_enc - means\n        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)\n        x_enc /= stdev\n\n        _, L, N = x_enc.shape\n\n        # Embedding\n        enc_out = self.enc_embedding(x_enc, None)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        dec_out = self.projection(enc_out).permute(0, 2, 1)[:, :, :N]\n        # De-Normalization from Non-stationary Transformer\n        dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, L, 1))\n        dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, L, 1))\n        return dec_out\n\n    def classification(self, x_enc, x_mark_enc):\n        # Embedding\n        enc_out = self.enc_embedding(x_enc, None)\n        enc_out, attns = self.encoder(enc_out, attn_mask=None)\n\n        # Output\n        output = self.act(enc_out)  # the output transformer encoder/decoder embeddings don't include non-linearity\n        output = self.dropout(output)\n        output = output.reshape(output.shape[0], -1)  # (batch_size, c_in * d_model)\n        output = self.projection(output)  # (batch_size, num_classes)\n        return output\n\n    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):\n        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':\n            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)\n            return dec_out[:, -self.pred_len:, :]  # [B, L, D]\n        if self.task_name == 'imputation':\n            dec_out = self.imputation(x_enc, x_mark_enc, x_dec, x_mark_dec, mask)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'anomaly_detection':\n            dec_out = self.anomaly_detection(x_enc)\n            return dec_out  # [B, L, D]\n        if self.task_name == 'classification':\n            dec_out = self.classification(x_enc, x_mark_enc)\n            return dec_out  # [B, N]\n        return None\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_autoformer.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom Other_baselines.exp.exp_long_term_forecasting import Exp_Long_Term_Forecast\nfrom Other_baselines.exp.exp_short_term_forecasting import Exp_Short_Term_Forecast\nfrom utils.print_args import print_args\nimport random\nimport numpy as np\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser(description='DLinear')  ### DLinear iTransformer TimesNet Informer Autoformer\n\n    # basic config\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, required=False, default=1, help='status')\n    parser.add_argument('--model_id', type=str, required=False, default='test', help='model id')\n    parser.add_argument('--model', type=str, required=False, default='Autoformer',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    parser.add_argument('--data', type=str, required=False, default='custom', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    parser.add_argument('--root_path', type=str, default='/dev_data/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='traffic.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.25, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--expand', type=int, default=2, help='expansion factor for Mamba')\n    parser.add_argument('--d_conv', type=int, default=4, help='conv kernel size for Mamba')\n    parser.add_argument('--top_k', type=int, default=5, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n    parser.add_argument('--channel_independence', type=int, default=1,\n                        help='0: channel dependence 1: channel independence for FreTS model')\n    parser.add_argument('--decomp_method', type=str, default='moving_avg',\n                        help='method of series decompsition, only support moving_avg or dft_decomp')\n    parser.add_argument('--use_norm', type=int, default=1, help='whether to use normalize; True 1 False 0')\n    parser.add_argument('--down_sampling_layers', type=int, default=0, help='num of down sampling layers')\n    parser.add_argument('--down_sampling_window', type=int, default=1, help='down sampling window size')\n    parser.add_argument('--down_sampling_method', type=str, default=None,\n                        help='down sampling method, only support avg, max, conv')\n    parser.add_argument('--seg_len', type=int, default=48,\n                        help='the length of segmen-wise iteration of SegRNN')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=8, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', help='use multiple gpus', default=False)  ## action='store_true',\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # metrics (dtw)\n    parser.add_argument('--use_dtw', type=bool, default=False,\n                        help='the controller of using dtw metric (dtw is time consuming, not suggested unless necessary)')\n\n    # Augmentation\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--seed', type=int, default=2, help=\"Randomization seed\")\n    parser.add_argument('--jitter', default=False, action=\"store_true\", help=\"Jitter preset augmentation\")\n    parser.add_argument('--scaling', default=False, action=\"store_true\", help=\"Scaling preset augmentation\")\n    parser.add_argument('--permutation', default=False, action=\"store_true\",\n                        help=\"Equal Length Permutation preset augmentation\")\n    parser.add_argument('--randompermutation', default=False, action=\"store_true\",\n                        help=\"Random Length Permutation preset augmentation\")\n    parser.add_argument('--magwarp', default=False, action=\"store_true\", help=\"Magnitude warp preset augmentation\")\n    parser.add_argument('--timewarp', default=False, action=\"store_true\", help=\"Time warp preset augmentation\")\n    parser.add_argument('--windowslice', default=False, action=\"store_true\", help=\"Window slice preset augmentation\")\n    parser.add_argument('--windowwarp', default=False, action=\"store_true\", help=\"Window warp preset augmentation\")\n    parser.add_argument('--rotation', default=False, action=\"store_true\", help=\"Rotation preset augmentation\")\n    parser.add_argument('--spawner', default=False, action=\"store_true\", help=\"SPAWNER preset augmentation\")\n    parser.add_argument('--dtwwarp', default=False, action=\"store_true\", help=\"DTW warp preset augmentation\")\n    parser.add_argument('--shapedtwwarp', default=False, action=\"store_true\", help=\"Shape DTW warp preset augmentation\")\n    parser.add_argument('--wdba', default=False, action=\"store_true\", help=\"Weighted DBA preset augmentation\")\n    parser.add_argument('--discdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive DTW warp preset augmentation\")\n    parser.add_argument('--discsdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive shapeDTW warp preset augmentation\")\n    parser.add_argument('--extra_tag', type=str, default=\"\", help=\"Anything extra\")\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='autoformer_forecasting_0804.csv')\n\n    args = parser.parse_args()\n    # args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n    args.use_gpu = True if torch.cuda.is_available() else False\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.root_path):\n        args.root_path = '/dev_data/lz/ts_forecasting_methods/ts2vec/datasets'\n        args.save_dir = '/dev_data/lz/ts_forecasting_methods/result/'\n\n    print(\"root_path = \", args.root_path)  # 输出检查\n\n    print(torch.cuda.is_available())\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print_args(args)\n\n    from Other_baselines.data_provider.data_factory import data_provider\n\n    train_data, train_loader = data_provider(args, 'train')\n\n    print(\"train_data.shape = \", train_data.data_x.shape)\n    args.enc_in = train_data.data_x.shape[-1]\n    args.dec_in = train_data.data_x.shape[-1]\n    args.c_out = train_data.data_x.shape[-1]\n\n    if args.task_name == 'long_term_forecast':\n        Exp = Exp_Long_Term_Forecast\n    elif args.task_name == 'short_term_forecast':\n        Exp = Exp_Short_Term_Forecast\n    else:\n        Exp = Exp_Long_Term_Forecast\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            exp = Exp(args)  # set experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.expand,\n                args.d_conv,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            mae, mse, rmse, mape, mspe = exp.test(setting)\n            # torch.cuda.empty_cache()\n            print(\"Success mae, mse = \", mae, mse)\n\n            end_result = {}\n            end_result['dataset'] = args.data_path\n            end_result['pred_len'] = args.pred_len\n            end_result['random_seed'] = args.random_seed\n            end_result['MSE'] = mse\n            end_result['MAE'] = mae\n\n            import pandas as pd\n\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.expand,\n            args.d_conv,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        # torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_cost.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport time\nimport datetime\nimport torch\nimport argparse\nimport os\nimport random\nfrom Other_baselines.data_provider.data_factory import data_provider\nimport numpy as np\n# import CoST.tasks as tasks\nimport CoST.datautils as datautils\nfrom CoST.utils import init_dl_program, name_with_datetime, pkl_save\nfrom CoST.tasks import _eval_protocols as eval_protocols\n\n# import methods\nfrom  CoST.cost import CoST\n\n\ndef generate_pred_samples(features, data, pred_len, drop=0):\n    n = data.shape[1]\n    features = features[:, :-pred_len]\n    labels = np.stack([ data[:, i:1+n+i-pred_len] for i in range(pred_len)], axis=2)[:, 1:]\n    features = features[:, drop:]\n    labels = labels[:, drop:]\n    return features.reshape(-1, features.shape[-1]), \\\n            labels.reshape(-1, labels.shape[2]*labels.shape[3])\n\n\ndef cal_metrics(pred, target):\n    return {\n        'MSE': ((pred - target) ** 2).mean(),\n        'MAE': np.abs(pred - target).mean()\n    }\n\n\ndef eval_forecasting(model, train_data, valid_data, test_data, pred_lens, padding):\n    t = time.time()\n\n    train_repr = model.encode(\n        train_data,\n        mode='forecasting',\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=32\n    )\n\n    valid_repr = model.encode(\n        valid_data,\n        mode='forecasting',\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=32\n    )\n\n    test_repr = model.encode(\n        test_data,\n        mode='forecasting',\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=32\n    )\n\n    # train_repr = all_repr[:, train_slice]\n    # valid_repr = all_repr[:, valid_slice]\n    # test_repr = all_repr[:, test_slice]\n\n    # train_data = data[:, train_slice, n_covariate_cols:]\n    # valid_data = data[:, valid_slice, n_covariate_cols:]\n    # test_data = data[:, test_slice, n_covariate_cols:]\n\n    encoder_infer_time = time.time() - t\n\n    ours_result = {}\n    lr_train_time = {}\n    lr_infer_time = {}\n    out_log = {}\n    for pred_len in pred_lens:\n        train_features, train_labels = generate_pred_samples(train_repr, train_data, pred_len, drop=padding)\n        valid_features, valid_labels = generate_pred_samples(valid_repr, valid_data, pred_len)\n        test_features, test_labels = generate_pred_samples(test_repr, test_data, pred_len)\n\n        t = time.time()\n        lr = eval_protocols.fit_ridge(train_features, train_labels, valid_features, valid_labels)\n        lr_train_time[pred_len] = time.time() - t\n\n        t = time.time()\n        test_pred = lr.predict(test_features)\n        lr_infer_time[pred_len] = time.time() - t\n\n        ori_shape = test_data.shape[0], -1, pred_len, test_data.shape[2]\n        test_pred = test_pred.reshape(ori_shape)\n        test_labels = test_labels.reshape(ori_shape)\n\n        # if test_data.shape[0] > 1:\n        #     test_pred_inv = scaler.inverse_transform(test_pred.swapaxes(0, 3)).swapaxes(0, 3)\n        #     test_labels_inv = scaler.inverse_transform(test_labels.swapaxes(0, 3)).swapaxes(0, 3)\n        # else:\n        #     test_pred_inv = scaler.inverse_transform(test_pred)\n        #     test_labels_inv = scaler.inverse_transform(test_labels)\n        out_log[pred_len] = {\n            'norm': test_pred,\n            # 'raw': test_pred_inv,\n            'norm_gt': test_labels,\n            # 'raw_gt': test_labels_inv\n        }\n        ours_result[pred_len] = {\n            'norm': cal_metrics(test_pred, test_labels),\n            # 'raw': cal_metrics(test_pred_inv, test_labels_inv)\n        }\n\n    eval_res = {\n        'ours': ours_result,\n        'encoder_infer_time': encoder_infer_time,\n        'lr_train_time': lr_train_time,\n        'lr_infer_time': lr_infer_time\n    }\n    return out_log, eval_res\n\n\ndef save_checkpoint_callback(\n        save_every=1,\n        unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n\n    return callback\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # parser.add_argument('dataset', help='The dataset name')\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--dataset', default='national_illness',\n                        help='The dataset name')  ## 'ETTh1', 'ETTh2', 'electricity'  ETTm1\n    # parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--run_name', default='CoST',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--archive', type=str, required=False, default='forecast_csv',\n                        help='The archive name that the dataset belongs to. This can be set to forecast_csv, or forecast_csv_univar')\n    parser.add_argument('--gpu', type=int, default=1,\n                        help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=201,\n                        help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None,\n                        help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=8,\n                        help='The maximum allowed number of threads used by this process')\n    # parser.add_argument('--eval', action=\"store_true\", help='Whether to perform evaluation after training')\n    parser.add_argument('--eval', default=True,\n                        help='Whether to perform evaluation after training')  ## action=\"store_true\"\n\n    parser.add_argument('--kernels', type=int, nargs='+', default=[1, 2, 4, 8, 16, 32, 64, 128])\n    parser.add_argument('--alpha', type=float, default=0.0005)\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='CoST_forecasting_0730.csv')\n\n    parser.add_argument('--task_name', type=str, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--data', type=str, default='custom', help='dataset type')\n    parser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='national_illness.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly',\n                        help='subset for M4')  ## Hourly Daily Weekly Monthly Quarterly Yearly\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n\n    args = parser.parse_args()\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/ts_forecasting_methods/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n\n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n\n    if args.archive == 'forecast_csv':\n        task_type = 'forecasting'\n        # data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(\n        #     args.dataset)\n        # train_data = data[:, train_slice]\n\n        train_data, train_loader = data_provider(args, 'train')\n        vali_data, vali_loader = data_provider(args, 'val')\n        test_data, test_loader = data_provider(args, 'test')\n\n        print(\"dataset name = \", args.data_path)\n\n        print(\"type train_data = \", type(train_data))\n\n        print(\"train_data = \", train_data)\n        print(train_data.data_x.shape, train_data.data_y.shape)\n\n        print(\"train_data = \", train_data)\n        print(vali_data.data_x.shape, vali_data.data_y.shape)\n\n        print(\"train_data = \", train_data)\n        print(test_data.data_x.shape, test_data.data_y.shape)\n\n        new_train_data = train_data.data_x[np.newaxis, :, :]\n        new_vali_data = vali_data.data_x[np.newaxis, :, :]\n        new_test_data = test_data.data_x[np.newaxis, :, :]\n\n        print(\"new_train_data = \", new_train_data.shape, new_vali_data.shape, new_test_data.shape)\n\n\n    elif args.archive == 'forecast_csv_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(\n            args.dataset, univar=True)\n        train_data = data[:, train_slice]\n    elif args.archive == 'forecast_npy':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(\n            args.dataset)\n        train_data = data[:, train_slice]\n    elif args.archive == 'forecast_npy_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(\n            args.dataset, univar=True)\n        train_data = data[:, train_slice]\n    else:\n        raise ValueError(f\"Archive type {args.archive} is not supported.\")\n\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n    )\n\n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = f\"training/{args.dataset}/{name_with_datetime(args.run_name)}\"\n\n    os.makedirs(run_dir, exist_ok=True)\n\n    t = time.time()\n\n    model = CoST(\n        input_dims=new_train_data.shape[-1],\n        kernels=args.kernels,\n        alpha=args.alpha,\n        max_train_length=args.max_train_length,\n        device=device,\n        **config\n    )\n\n    print(\"train_data.shape = \", new_train_data.shape)\n\n    loss_log = model.fit(\n        new_train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\\n\")\n\n    if args.eval:\n        pred_lens = [96, 192, 336, 720]\n        if args.dataset == 'national_illness':\n            pred_lens = [24, 36, 48, 60]\n\n        # out, eval_res = eval_forecasting(model, data, train_slice, valid_slice, test_slice, scaler, pred_lens,\n        #                                        n_covariate_cols, args.max_train_length - 1)\n        out, eval_res = eval_forecasting(model, new_train_data, new_vali_data, new_test_data, pred_lens, args.max_train_length - 1)\n        print('Evaluation result:', eval_res)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        pkl_save(f'{run_dir}/out.pkl', out)\n\n        print(\"ts2vec eval_res = \", eval_res)\n\n        end_result = {}\n        end_result['dataset'] = args.dataset\n        end_result['random_seed'] = args.random_seed\n        for _pred in pred_lens:\n            _MSE = str(_pred) + \"_MSE\"\n            end_result[_MSE] = eval_res['ours'][_pred]['norm']['MSE']\n            _MAE = str(_pred) + \"_MAE\"\n            end_result[_MAE] = eval_res['ours'][_pred]['norm']['MAE']\n\n        import pandas as pd\n\n        # 转换字典为 DataFrame\n        # df = pd.DataFrame([eval_res])\n        # 指定保存路径\n        save_path = args.save_dir + args.save_csv_name\n\n        # 转换字典为 DataFrame\n        df_new = pd.DataFrame([end_result])\n\n        # 检查文件是否存在\n        if os.path.exists(save_path):\n            # 文件存在，读取现有数据\n            df_existing = pd.read_csv(save_path, index_col=0)\n            # 将新数据附加到现有数据框中\n            df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n        else:\n            # 文件不存在，创建新的数据框\n            df_combined = df_new\n\n        # 保存 DataFrame 为 CSV 文件\n        df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n        print(\"Save success!!!\")\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_dlinear.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom Other_baselines.exp.exp_long_term_forecasting import Exp_Long_Term_Forecast\nfrom Other_baselines.exp.exp_short_term_forecasting import Exp_Short_Term_Forecast\nfrom utils.print_args import print_args\nimport random\nimport numpy as np\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser(description='DLinear')  ### DLinear iTransformer TimesNet Informer Autoformer\n\n    # basic config\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, required=False, default=1, help='status')\n    parser.add_argument('--model_id', type=str, required=False, default='test', help='model id')\n    parser.add_argument('--model', type=str, required=False, default='DLinear',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    parser.add_argument('--data', type=str, required=False, default='ETTh1', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    parser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.25, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--expand', type=int, default=2, help='expansion factor for Mamba')\n    parser.add_argument('--d_conv', type=int, default=4, help='conv kernel size for Mamba')\n    parser.add_argument('--top_k', type=int, default=5, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n    parser.add_argument('--channel_independence', type=int, default=1,\n                        help='0: channel dependence 1: channel independence for FreTS model')\n    parser.add_argument('--decomp_method', type=str, default='moving_avg',\n                        help='method of series decompsition, only support moving_avg or dft_decomp')\n    parser.add_argument('--use_norm', type=int, default=1, help='whether to use normalize; True 1 False 0')\n    parser.add_argument('--down_sampling_layers', type=int, default=0, help='num of down sampling layers')\n    parser.add_argument('--down_sampling_window', type=int, default=1, help='down sampling window size')\n    parser.add_argument('--down_sampling_method', type=str, default=None,\n                        help='down sampling method, only support avg, max, conv')\n    parser.add_argument('--seg_len', type=int, default=48,\n                        help='the length of segmen-wise iteration of SegRNN')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=128, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', help='use multiple gpus', default=False)  ## action='store_true',\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # metrics (dtw)\n    parser.add_argument('--use_dtw', type=bool, default=False,\n                        help='the controller of using dtw metric (dtw is time consuming, not suggested unless necessary)')\n\n    # Augmentation\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--seed', type=int, default=2, help=\"Randomization seed\")\n    parser.add_argument('--jitter', default=False, action=\"store_true\", help=\"Jitter preset augmentation\")\n    parser.add_argument('--scaling', default=False, action=\"store_true\", help=\"Scaling preset augmentation\")\n    parser.add_argument('--permutation', default=False, action=\"store_true\",\n                        help=\"Equal Length Permutation preset augmentation\")\n    parser.add_argument('--randompermutation', default=False, action=\"store_true\",\n                        help=\"Random Length Permutation preset augmentation\")\n    parser.add_argument('--magwarp', default=False, action=\"store_true\", help=\"Magnitude warp preset augmentation\")\n    parser.add_argument('--timewarp', default=False, action=\"store_true\", help=\"Time warp preset augmentation\")\n    parser.add_argument('--windowslice', default=False, action=\"store_true\", help=\"Window slice preset augmentation\")\n    parser.add_argument('--windowwarp', default=False, action=\"store_true\", help=\"Window warp preset augmentation\")\n    parser.add_argument('--rotation', default=False, action=\"store_true\", help=\"Rotation preset augmentation\")\n    parser.add_argument('--spawner', default=False, action=\"store_true\", help=\"SPAWNER preset augmentation\")\n    parser.add_argument('--dtwwarp', default=False, action=\"store_true\", help=\"DTW warp preset augmentation\")\n    parser.add_argument('--shapedtwwarp', default=False, action=\"store_true\", help=\"Shape DTW warp preset augmentation\")\n    parser.add_argument('--wdba', default=False, action=\"store_true\", help=\"Weighted DBA preset augmentation\")\n    parser.add_argument('--discdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive DTW warp preset augmentation\")\n    parser.add_argument('--discsdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive shapeDTW warp preset augmentation\")\n    parser.add_argument('--extra_tag', type=str, default=\"\", help=\"Anything extra\")\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='dlinear_forecasting_0729.csv')\n\n    args = parser.parse_args()\n    # args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n    args.use_gpu = True if torch.cuda.is_available() else False\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.root_path):\n        args.root_path = '/dev_data/lz/ts_forecasting_methods/ts2vec/datasets'\n        args.save_dir = '/dev_data/lz/ts_forecasting_methods/result/'\n\n    print(\"root_path = \", args.root_path)  # 输出检查\n\n    print(torch.cuda.is_available())\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print_args(args)\n\n    if args.task_name == 'long_term_forecast':\n        Exp = Exp_Long_Term_Forecast\n    elif args.task_name == 'short_term_forecast':\n        Exp = Exp_Short_Term_Forecast\n    else:\n        Exp = Exp_Long_Term_Forecast\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            exp = Exp(args)  # set experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.expand,\n                args.d_conv,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            mae, mse, rmse, mape, mspe = exp.test(setting)\n            # torch.cuda.empty_cache()\n            print(\"Success mae, mse = \", mae, mse)\n\n            end_result = {}\n            end_result['dataset'] = args.data_path\n            end_result['pred_len'] = args.pred_len\n            end_result['random_seed'] = args.random_seed\n            end_result['MSE'] = mse\n            end_result['MAE'] = mae\n\n            import pandas as pd\n\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.expand,\n            args.d_conv,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        # torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_gpt4ts.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nfrom data_provider.data_factory import data_provider\nfrom Other_baselines.utils.tools import EarlyStopping, adjust_learning_rate, visual, vali, test\nfrom tqdm import tqdm\nfrom Other_baselines.models.PatchTST import PatchTST\nfrom Other_baselines.models.GPT4TS import GPT4TS\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\n\nimport os\nimport time\n\nimport warnings\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport argparse\nimport random\n\nwarnings.filterwarnings('ignore')\n\n\n\nparser = argparse.ArgumentParser(description='GPT4TS') ## GPT4TS PatchTST\nparser.add_argument('--random_seed', type=int, default=42, help='random seed')\nparser.add_argument('--model_id', type=str, required=False, default='test')\nparser.add_argument('--checkpoints', type=str, default='./checkpoints/')\nparser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                    help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\nparser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n\n# parser.add_argument('--root_path', type=str, default='./dataset/traffic/')\n# parser.add_argument('--data_path', type=str, default='traffic.csv')\n# data loader\n## ETTh1 ETTh2 ETTm1  ETTm2 electricity traffic weather exchange_rate national_illness\n\nparser.add_argument('--root_path', type=str, default='/dev_data/lz/ts_forecasting_methods/ts2vec/datasets',\n                    help='root path of the data file')\nparser.add_argument('--data_path', type=str, default='traffic.csv', help='data file')\nparser.add_argument('--data', type=str, default='custom')\nparser.add_argument('--features', type=str, default='M')\n# parser.add_argument('--freq', type=int, default=1)\nparser.add_argument('--freq', type=str, default='h',\n                    help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n# parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\nparser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n\nparser.add_argument('--target', type=str, default='OT')\nparser.add_argument('--embed', type=str, default='timeF')\nparser.add_argument('--percent', type=int, default=10)\n\nparser.add_argument('--seq_len', type=int, default=512)\nparser.add_argument('--pred_len', type=int, default=96)\nparser.add_argument('--label_len', type=int, default=48)\n\nparser.add_argument('--decay_fac', type=float, default=0.75)\nparser.add_argument('--learning_rate', type=float, default=0.0001)\nparser.add_argument('--batch_size', type=int, default=2)\nparser.add_argument('--num_workers', type=int, default=10)\nparser.add_argument('--train_epochs', type=int, default=10)\nparser.add_argument('--lradj', type=str, default='type1')\nparser.add_argument('--patience', type=int, default=3)\n\nparser.add_argument('--gpt_layers', type=int, default=3)\nparser.add_argument('--is_gpt', type=int, default=1)\nparser.add_argument('--e_layers', type=int, default=3)\nparser.add_argument('--d_model', type=int, default=768)\nparser.add_argument('--n_heads', type=int, default=16)\nparser.add_argument('--d_ff', type=int, default=512)\nparser.add_argument('--dropout', type=float, default=0.2)\nparser.add_argument('--enc_in', type=int, default=862)\nparser.add_argument('--c_out', type=int, default=862)\nparser.add_argument('--patch_size', type=int, default=16)\nparser.add_argument('--kernel_size', type=int, default=25)\n\nparser.add_argument('--loss_func', type=str, default='mse')\nparser.add_argument('--pretrain', type=int, default=1)\nparser.add_argument('--freeze', type=int, default=1)\nparser.add_argument('--model', type=str, default='PatchTST')  ### PatchTST   model\nparser.add_argument('--stride', type=int, default=8)\nparser.add_argument('--max_len', type=int, default=-1)\nparser.add_argument('--hid_dim', type=int, default=16)\nparser.add_argument('--tmax', type=int, default=10)\n\nparser.add_argument('--itr', type=int, default=1)\nparser.add_argument('--cos', type=int, default=0)\n\nparser.add_argument('--save_dir', type=str, default='/dev_data/lz/ts_forecasting_methods/result/')\nparser.add_argument('--save_csv_name', type=str, default='gpt4ts_forecasting_0730.csv')\n\nargs = parser.parse_args()\n\nfix_seed = args.random_seed\nrandom.seed(fix_seed)\ntorch.manual_seed(fix_seed)\nnp.random.seed(fix_seed)\n\nSEASONALITY_MAP = {\n    \"minutely\": 1440,\n    \"10_minutes\": 144,\n    \"half_hourly\": 48,\n    \"hourly\": 24,\n    \"daily\": 7,\n    \"weekly\": 1,\n    \"monthly\": 12,\n    \"quarterly\": 4,\n    \"yearly\": 1\n}\n\nmses = []\nmaes = []\n\nfor ii in range(args.itr):\n\n    setting = '{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_gl{}_df{}_eb{}_itr{}'.format(args.model_id, 336, args.label_len,\n                                                                             args.pred_len,\n                                                                             args.d_model, args.n_heads, args.e_layers,\n                                                                             args.gpt_layers,\n                                                                             args.d_ff, args.embed, ii)\n    path = os.path.join(args.checkpoints, setting)\n    if not os.path.exists(path):\n        os.makedirs(path)\n\n    if args.freq == 0:\n        args.freq = 'h'\n\n    train_data, train_loader = data_provider(args, 'train')\n    vali_data, vali_loader = data_provider(args, 'val')\n    test_data, test_loader = data_provider(args, 'test')\n\n    if args.freq != 'h':\n        args.freq = SEASONALITY_MAP[test_data.freq]\n        print(\"freq = {}\".format(args.freq))\n\n    device = torch.device('cuda:0')\n\n    time_now = time.time()\n    train_steps = len(train_loader)\n\n    if args.model == 'PatchTST':\n        model = PatchTST(args, device)\n        model.to(device)\n    else:\n        model = GPT4TS(args, device)\n    # mse, mae = test(model, test_data, test_loader, args, device, ii)\n\n    params = model.parameters()\n    model_optim = torch.optim.Adam(params, lr=args.learning_rate)\n\n    early_stopping = EarlyStopping(patience=args.patience, verbose=True)\n    if args.loss_func == 'mse':\n        criterion = nn.MSELoss()\n    elif args.loss_func == 'smape':\n        class SMAPE(nn.Module):\n            def __init__(self):\n                super(SMAPE, self).__init__()\n\n            def forward(self, pred, true):\n                return torch.mean(200 * torch.abs(pred - true) / (torch.abs(pred) + torch.abs(true) + 1e-8))\n\n\n        criterion = SMAPE()\n\n    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model_optim, T_max=args.tmax, eta_min=1e-8)\n\n    for epoch in range(args.train_epochs):\n\n        iter_count = 0\n        train_loss = []\n        epoch_time = time.time()\n        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in tqdm(enumerate(train_loader)):\n\n            iter_count += 1\n            model_optim.zero_grad()\n            batch_x = batch_x.float().to(device)\n\n            batch_y = batch_y.float().to(device)\n            batch_x_mark = batch_x_mark.float().to(device)\n            batch_y_mark = batch_y_mark.float().to(device)\n\n            outputs = model(batch_x, ii)\n\n            outputs = outputs[:, -args.pred_len:, :]\n            batch_y = batch_y[:, -args.pred_len:, :].to(device)\n            loss = criterion(outputs, batch_y)\n            train_loss.append(loss.item())\n\n            if (i + 1) % 1000 == 0:\n                print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                speed = (time.time() - time_now) / iter_count\n                left_time = speed * ((args.train_epochs - epoch) * train_steps - i)\n                print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                iter_count = 0\n                time_now = time.time()\n            loss.backward()\n            model_optim.step()\n\n        print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n\n        train_loss = np.average(train_loss)\n        vali_loss = vali(model, vali_data, vali_loader, criterion, args, device, ii)\n        # test_loss = vali(model, test_data, test_loader, criterion, args, device, ii)\n        # print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f}, Test Loss: {4:.7f}\".format(\n        #     epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n        print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f}\".format(\n            epoch + 1, train_steps, train_loss, vali_loss))\n\n        if args.cos:\n            scheduler.step()\n            print(\"lr = {:.10f}\".format(model_optim.param_groups[0]['lr']))\n        else:\n            adjust_learning_rate(model_optim, epoch + 1, args)\n        early_stopping(vali_loss, model, path)\n        if early_stopping.early_stop:\n            print(\"Early stopping\")\n            break\n\n    best_model_path = path + '/' + 'checkpoint.pth'\n    model.load_state_dict(torch.load(best_model_path))\n    print(\"------------------------------------\")\n    mse, mae = test(model, test_data, test_loader, args, device, ii)\n    mses.append(mse)\n    maes.append(mae)\n\n    end_result = {}\n    end_result['dataset'] = args.data_path\n    end_result['pred_len'] = args.pred_len\n    end_result['random_seed'] = args.random_seed\n    end_result['MSE'] = mse\n    end_result['MAE'] = mae\n\n    import pandas as pd\n\n    # 指定保存路径\n    save_path = args.save_dir + args.save_csv_name\n\n    # 转换字典为 DataFrame\n    df_new = pd.DataFrame([end_result])\n\n    # 检查文件是否存在\n    if os.path.exists(save_path):\n        # 文件存在，读取现有数据\n        df_existing = pd.read_csv(save_path, index_col=0)\n        # 将新数据附加到现有数据框中\n        df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n    else:\n        # 文件不存在，创建新的数据框\n        df_combined = df_new\n\n    # 保存 DataFrame 为 CSV 文件\n    df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    print(\"Save success!!!\")\n\nmses = np.array(mses)\nmaes = np.array(maes)\nprint(\"mse_mean = {:.4f}, mse_std = {:.4f}\".format(np.mean(mses), np.std(mses)))\nprint(\"mae_mean = {:.4f}, mae_std = {:.4f}\".format(np.mean(maes), np.std(maes)))"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_informer.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom Other_baselines.exp.exp_long_term_forecasting import Exp_Long_Term_Forecast\nfrom Other_baselines.exp.exp_short_term_forecasting import Exp_Short_Term_Forecast\nfrom utils.print_args import print_args\nimport random\nimport numpy as np\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser(description='DLinear')  ### DLinear iTransformer TimesNet Informer Autoformer\n\n    # basic config\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, required=False, default=1, help='status')\n    parser.add_argument('--model_id', type=str, required=False, default='test', help='model id')\n    parser.add_argument('--model', type=str, required=False, default='Informer',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    parser.add_argument('--data', type=str, required=False, default='custom', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    parser.add_argument('--root_path', type=str, default='/dev_data/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='traffic.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.25, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--expand', type=int, default=2, help='expansion factor for Mamba')\n    parser.add_argument('--d_conv', type=int, default=4, help='conv kernel size for Mamba')\n    parser.add_argument('--top_k', type=int, default=5, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n    parser.add_argument('--channel_independence', type=int, default=1,\n                        help='0: channel dependence 1: channel independence for FreTS model')\n    parser.add_argument('--decomp_method', type=str, default='moving_avg',\n                        help='method of series decompsition, only support moving_avg or dft_decomp')\n    parser.add_argument('--use_norm', type=int, default=1, help='whether to use normalize; True 1 False 0')\n    parser.add_argument('--down_sampling_layers', type=int, default=0, help='num of down sampling layers')\n    parser.add_argument('--down_sampling_window', type=int, default=1, help='down sampling window size')\n    parser.add_argument('--down_sampling_method', type=str, default=None,\n                        help='down sampling method, only support avg, max, conv')\n    parser.add_argument('--seg_len', type=int, default=48,\n                        help='the length of segmen-wise iteration of SegRNN')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=8, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', help='use multiple gpus', default=False)  ## action='store_true',\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # metrics (dtw)\n    parser.add_argument('--use_dtw', type=bool, default=False,\n                        help='the controller of using dtw metric (dtw is time consuming, not suggested unless necessary)')\n\n    # Augmentation\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--seed', type=int, default=2, help=\"Randomization seed\")\n    parser.add_argument('--jitter', default=False, action=\"store_true\", help=\"Jitter preset augmentation\")\n    parser.add_argument('--scaling', default=False, action=\"store_true\", help=\"Scaling preset augmentation\")\n    parser.add_argument('--permutation', default=False, action=\"store_true\",\n                        help=\"Equal Length Permutation preset augmentation\")\n    parser.add_argument('--randompermutation', default=False, action=\"store_true\",\n                        help=\"Random Length Permutation preset augmentation\")\n    parser.add_argument('--magwarp', default=False, action=\"store_true\", help=\"Magnitude warp preset augmentation\")\n    parser.add_argument('--timewarp', default=False, action=\"store_true\", help=\"Time warp preset augmentation\")\n    parser.add_argument('--windowslice', default=False, action=\"store_true\", help=\"Window slice preset augmentation\")\n    parser.add_argument('--windowwarp', default=False, action=\"store_true\", help=\"Window warp preset augmentation\")\n    parser.add_argument('--rotation', default=False, action=\"store_true\", help=\"Rotation preset augmentation\")\n    parser.add_argument('--spawner', default=False, action=\"store_true\", help=\"SPAWNER preset augmentation\")\n    parser.add_argument('--dtwwarp', default=False, action=\"store_true\", help=\"DTW warp preset augmentation\")\n    parser.add_argument('--shapedtwwarp', default=False, action=\"store_true\", help=\"Shape DTW warp preset augmentation\")\n    parser.add_argument('--wdba', default=False, action=\"store_true\", help=\"Weighted DBA preset augmentation\")\n    parser.add_argument('--discdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive DTW warp preset augmentation\")\n    parser.add_argument('--discsdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive shapeDTW warp preset augmentation\")\n    parser.add_argument('--extra_tag', type=str, default=\"\", help=\"Anything extra\")\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='informer_forecasting_0729.csv')\n\n    args = parser.parse_args()\n    # args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n    args.use_gpu = True if torch.cuda.is_available() else False\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.root_path):\n        args.root_path = '/dev_data/lz/ts_forecasting_methods/ts2vec/datasets'\n        args.save_dir = '/dev_data/lz/ts_forecasting_methods/result/'\n\n    print(\"root_path = \", args.root_path)  # 输出检查\n\n    print(torch.cuda.is_available())\n\n\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print_args(args)\n\n    from Other_baselines.data_provider.data_factory import data_provider\n\n    train_data, train_loader = data_provider(args, 'train')\n\n    print(\"train_data.shape = \", train_data.data_x.shape)\n    args.enc_in = train_data.data_x.shape[-1]\n    args.dec_in = train_data.data_x.shape[-1]\n    args.c_out = train_data.data_x.shape[-1]\n\n    if args.task_name == 'long_term_forecast':\n        Exp = Exp_Long_Term_Forecast\n    elif args.task_name == 'short_term_forecast':\n        Exp = Exp_Short_Term_Forecast\n    else:\n        Exp = Exp_Long_Term_Forecast\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            exp = Exp(args)  # set experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.expand,\n                args.d_conv,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            mae, mse, rmse, mape, mspe = exp.test(setting)\n            # torch.cuda.empty_cache()\n            print(\"Success mae, mse = \", mae, mse)\n\n            end_result = {}\n            end_result['dataset'] = args.data_path\n            end_result['pred_len'] = args.pred_len\n            end_result['random_seed'] = args.random_seed\n            end_result['MSE'] = mse\n            end_result['MAE'] = mae\n\n            import pandas as pd\n\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.expand,\n            args.d_conv,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        # torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_itransformer.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom Other_baselines.exp.exp_long_term_forecasting import Exp_Long_Term_Forecast\nfrom Other_baselines.exp.exp_short_term_forecasting import Exp_Short_Term_Forecast\nfrom utils.print_args import print_args\nimport random\nimport numpy as np\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser(description='DLinear')  ### DLinear iTransformer TimesNet Informer Autoformer\n\n    # basic config\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, required=False, default=1, help='status')\n    parser.add_argument('--model_id', type=str, required=False, default='test', help='model id')\n    parser.add_argument('--model', type=str, required=False, default='iTransformer',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    parser.add_argument('--data', type=str, required=False, default='ETTh1', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    parser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.25, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--expand', type=int, default=2, help='expansion factor for Mamba')\n    parser.add_argument('--d_conv', type=int, default=4, help='conv kernel size for Mamba')\n    parser.add_argument('--top_k', type=int, default=5, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n    parser.add_argument('--channel_independence', type=int, default=1,\n                        help='0: channel dependence 1: channel independence for FreTS model')\n    parser.add_argument('--decomp_method', type=str, default='moving_avg',\n                        help='method of series decompsition, only support moving_avg or dft_decomp')\n    parser.add_argument('--use_norm', type=int, default=1, help='whether to use normalize; True 1 False 0')\n    parser.add_argument('--down_sampling_layers', type=int, default=0, help='num of down sampling layers')\n    parser.add_argument('--down_sampling_window', type=int, default=1, help='down sampling window size')\n    parser.add_argument('--down_sampling_method', type=str, default=None,\n                        help='down sampling method, only support avg, max, conv')\n    parser.add_argument('--seg_len', type=int, default=48,\n                        help='the length of segmen-wise iteration of SegRNN')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=8, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', help='use multiple gpus', default=False)  ## action='store_true',\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # metrics (dtw)\n    parser.add_argument('--use_dtw', type=bool, default=False,\n                        help='the controller of using dtw metric (dtw is time consuming, not suggested unless necessary)')\n\n    # Augmentation\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--seed', type=int, default=2, help=\"Randomization seed\")\n    parser.add_argument('--jitter', default=False, action=\"store_true\", help=\"Jitter preset augmentation\")\n    parser.add_argument('--scaling', default=False, action=\"store_true\", help=\"Scaling preset augmentation\")\n    parser.add_argument('--permutation', default=False, action=\"store_true\",\n                        help=\"Equal Length Permutation preset augmentation\")\n    parser.add_argument('--randompermutation', default=False, action=\"store_true\",\n                        help=\"Random Length Permutation preset augmentation\")\n    parser.add_argument('--magwarp', default=False, action=\"store_true\", help=\"Magnitude warp preset augmentation\")\n    parser.add_argument('--timewarp', default=False, action=\"store_true\", help=\"Time warp preset augmentation\")\n    parser.add_argument('--windowslice', default=False, action=\"store_true\", help=\"Window slice preset augmentation\")\n    parser.add_argument('--windowwarp', default=False, action=\"store_true\", help=\"Window warp preset augmentation\")\n    parser.add_argument('--rotation', default=False, action=\"store_true\", help=\"Rotation preset augmentation\")\n    parser.add_argument('--spawner', default=False, action=\"store_true\", help=\"SPAWNER preset augmentation\")\n    parser.add_argument('--dtwwarp', default=False, action=\"store_true\", help=\"DTW warp preset augmentation\")\n    parser.add_argument('--shapedtwwarp', default=False, action=\"store_true\", help=\"Shape DTW warp preset augmentation\")\n    parser.add_argument('--wdba', default=False, action=\"store_true\", help=\"Weighted DBA preset augmentation\")\n    parser.add_argument('--discdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive DTW warp preset augmentation\")\n    parser.add_argument('--discsdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive shapeDTW warp preset augmentation\")\n    parser.add_argument('--extra_tag', type=str, default=\"\", help=\"Anything extra\")\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='itransformer_forecasting_0729.csv')\n\n    args = parser.parse_args()\n    # args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n    args.use_gpu = True if torch.cuda.is_available() else False\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.root_path):\n        args.root_path = '/dev_data/lz/ts_forecasting_methods/ts2vec/datasets'\n        args.save_dir = '/dev_data/lz/ts_forecasting_methods/result/'\n\n    print(\"root_path = \", args.root_path)  # 输出检查\n\n    print(torch.cuda.is_available())\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print_args(args)\n\n    if args.task_name == 'long_term_forecast':\n        Exp = Exp_Long_Term_Forecast\n    elif args.task_name == 'short_term_forecast':\n        Exp = Exp_Short_Term_Forecast\n    else:\n        Exp = Exp_Long_Term_Forecast\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            exp = Exp(args)  # set experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.expand,\n                args.d_conv,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            mae, mse, rmse, mape, mspe = exp.test(setting)\n            # torch.cuda.empty_cache()\n            print(\"Success mae, mse = \", mae, mse)\n\n            end_result = {}\n            end_result['dataset'] = args.data_path\n            end_result['pred_len'] = args.pred_len\n            end_result['random_seed'] = args.random_seed\n            end_result['MSE'] = mse\n            end_result['MAE'] = mae\n\n            import pandas as pd\n\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.expand,\n            args.d_conv,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        # torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_logtrans.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom Other_baselines.exp.exp_long_term_forecasting import Exp_Long_Term_Forecast\nfrom utils.print_args import print_args\nimport random\nimport numpy as np\n\ntorch.cuda.set_device(0)\n\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser(description='DLinear')  ### DLinear iTransformer TimesNet Informer Autoformer\n\n    # basic config\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, required=False, default=1, help='status')\n    parser.add_argument('--model_id', type=str, required=False, default='test', help='model id')\n    parser.add_argument('--model', type=str, required=False, default='LogTrans',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    parser.add_argument('--dataset', type=str, required=False, default='weather', help='dataset type')\n    parser.add_argument('--data', type=str, required=False, default='custom', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    parser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='weather.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.25, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--expand', type=int, default=2, help='expansion factor for Mamba')\n    parser.add_argument('--d_conv', type=int, default=4, help='conv kernel size for Mamba')\n    parser.add_argument('--top_k', type=int, default=5, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=21, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=21, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=21, help='output size')\n    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n    parser.add_argument('--channel_independence', type=int, default=1,\n                        help='0: channel dependence 1: channel independence for FreTS model')\n    parser.add_argument('--decomp_method', type=str, default='moving_avg',\n                        help='method of series decompsition, only support moving_avg or dft_decomp')\n    parser.add_argument('--use_norm', type=int, default=1, help='whether to use normalize; True 1 False 0')\n    parser.add_argument('--down_sampling_layers', type=int, default=0, help='num of down sampling layers')\n    parser.add_argument('--down_sampling_window', type=int, default=1, help='down sampling window size')\n    parser.add_argument('--down_sampling_method', type=str, default=None,\n                        help='down sampling method, only support avg, max, conv')\n    parser.add_argument('--seg_len', type=int, default=48,\n                        help='the length of segmen-wise iteration of SegRNN')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=2, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=128, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', help='use multiple gpus', default=False)  ## action='store_true',\n    parser.add_argument('--devices', type=str, default='0,1', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # metrics (dtw)\n    parser.add_argument('--use_dtw', type=bool, default=False,\n                        help='the controller of using dtw metric (dtw is time consuming, not suggested unless necessary)')\n\n    # Augmentation\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--seed', type=int, default=2, help=\"Randomization seed\")\n    parser.add_argument('--jitter', default=False, action=\"store_true\", help=\"Jitter preset augmentation\")\n    parser.add_argument('--scaling', default=False, action=\"store_true\", help=\"Scaling preset augmentation\")\n    parser.add_argument('--permutation', default=False, action=\"store_true\",\n                        help=\"Equal Length Permutation preset augmentation\")\n    parser.add_argument('--randompermutation', default=False, action=\"store_true\",\n                        help=\"Random Length Permutation preset augmentation\")\n    parser.add_argument('--magwarp', default=False, action=\"store_true\", help=\"Magnitude warp preset augmentation\")\n    parser.add_argument('--timewarp', default=False, action=\"store_true\", help=\"Time warp preset augmentation\")\n    parser.add_argument('--windowslice', default=False, action=\"store_true\", help=\"Window slice preset augmentation\")\n    parser.add_argument('--windowwarp', default=False, action=\"store_true\", help=\"Window warp preset augmentation\")\n    parser.add_argument('--rotation', default=False, action=\"store_true\", help=\"Rotation preset augmentation\")\n    parser.add_argument('--spawner', default=False, action=\"store_true\", help=\"SPAWNER preset augmentation\")\n    parser.add_argument('--dtwwarp', default=False, action=\"store_true\", help=\"DTW warp preset augmentation\")\n    parser.add_argument('--shapedtwwarp', default=False, action=\"store_true\", help=\"Shape DTW warp preset augmentation\")\n    parser.add_argument('--wdba', default=False, action=\"store_true\", help=\"Weighted DBA preset augmentation\")\n    parser.add_argument('--discdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive DTW warp preset augmentation\")\n    parser.add_argument('--discsdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive shapeDTW warp preset augmentation\")\n    parser.add_argument('--extra_tag', type=str, default=\"\", help=\"Anything extra\")\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='logtrans_forecasting_0731.csv')\n\n    args = parser.parse_args()\n    # args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n    args.use_gpu = True if torch.cuda.is_available() else False\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.root_path):\n        args.root_path = '/dev_data/lz/ts_forecasting_methods/ts2vec/datasets'\n        args.save_dir = '/dev_data/lz/ts_forecasting_methods/result/'\n\n    print(\"root_path = \", args.root_path)  # 输出检查\n\n    print(torch.cuda.is_available())\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print_args(args)\n\n    # parser.add_argument('--enc_in', type=int, default=21, help='encoder input size')\n    # parser.add_argument('--dec_in', type=int, default=21, help='decoder input size')\n    # parser.add_argument('--c_out', type=int, default=21, help='output size')\n    from Other_baselines.data_provider.data_factory import data_provider\n    train_data, train_loader = data_provider(args, 'train')\n\n    print(\"train_data.shape = \", train_data.data_x.shape)\n    args.enc_in = train_data.data_x.shape[-1]\n    args.dec_in = train_data.data_x.shape[-1]\n    args.c_out = train_data.data_x.shape[-1]\n\n    Exp = Exp_Long_Term_Forecast\n\n\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            exp = Exp(args)  # set experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.expand,\n                args.d_conv,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            mae, mse, rmse, mape, mspe = exp.test(setting)\n            # torch.cuda.empty_cache()\n            print(\"Success mae, mse = \", mae, mse)\n\n            end_result = {}\n            end_result['dataset'] = args.data_path\n            end_result['pred_len'] = args.pred_len\n            end_result['random_seed'] = args.random_seed\n            end_result['MSE'] = mse\n            end_result['MAE'] = mae\n\n            import pandas as pd\n\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.expand,\n            args.d_conv,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        # torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_patchtst.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom Other_baselines.exp.exp_main import Exp_Main\nimport random\nimport numpy as np\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser(description='Autoformer & Transformer family for Time Series Forecasting')\n\n    # random seed\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n\n    # basic config\n    # parser.add_argument('--is_training', type=int, required=True, default=1, help='status')\n    # parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')\n    # parser.add_argument('--model', type=str, required=True, default='Autoformer',\n    #                     help='model name, options: [Autoformer, Informer, Transformer]')\n    parser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, required=False, default=1, help='status')\n    parser.add_argument('--model_id', type=str, required=False, default='test', help='model id')\n    parser.add_argument('--model', type=str, required=False, default='PatchTST',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n\n    # data loader\n    # parser.add_argument('--data', type=str, required=True, default='ETTm1', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    # parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')\n\n    parser.add_argument('--data', type=str, required=False, default='custom', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    parser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='national_illness.csv', help='data file')\n\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=60, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=60, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=60, help='prediction sequence length')\n\n    # DLinear\n    # parser.add_argument('--individual', action='store_true', default=False, help='DLinear: a linear layer for each variate(channel) individually')\n\n    # PatchTST\n    parser.add_argument('--fc_dropout', type=float, default=0.3, help='fully connected dropout')\n    parser.add_argument('--head_dropout', type=float, default=0.0, help='head dropout')\n    parser.add_argument('--patch_len', type=int, default=24, help='patch length')\n    parser.add_argument('--stride', type=int, default=8, help='stride')\n    parser.add_argument('--padding_patch', default='end', help='None: None; end: padding on the end')\n    parser.add_argument('--revin', type=int, default=1, help='RevIN; True 1 False 0')\n    parser.add_argument('--affine', type=int, default=0, help='RevIN-affine; True 1 False 0')\n    parser.add_argument('--subtract_last', type=int, default=0, help='0: subtract mean; 1: subtract last')\n    parser.add_argument('--decomposition', type=int, default=0, help='decomposition; True 1 False 0')\n    parser.add_argument('--kernel_size', type=int, default=25, help='decomposition-kernel')\n    parser.add_argument('--individual', type=int, default=0, help='individual head; True 1 False 0')\n\n    # Formers\n    parser.add_argument('--embed_type', type=int, default=0,\n                        help='0: default 1: value embedding + temporal embedding + positional embedding 2: value embedding + temporal embedding 3: value embedding + positional embedding 4: value embedding')\n    parser.add_argument('--enc_in', type=int, default=7,\n                        help='encoder input size')  # DLinear with --individual, use this hyperparameter as the number of channels\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.05, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n    parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')  ## epoches\n    parser.add_argument('--batch_size', type=int, default=128, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=100, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='mse', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type3', help='adjust learning rate')\n    parser.add_argument('--pct_start', type=float, default=0.3, help='pct_start')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n    parser.add_argument('--test_flop', action='store_true', default=False, help='See utils/tools for usage')\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='patchtst_forecasting_0814.csv')\n\n    args = parser.parse_args()\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/ts_forecasting_methods/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n\n    # random seed\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.dvices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print(args)\n\n    Exp = Exp_Main\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            exp = Exp(args)  # set experiments\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            mae, mse, rmse, mape, mspe = exp.test(setting)\n\n            end_result = {}\n            end_result['dataset'] = args.data_path\n            end_result['pred_len'] = args.pred_len\n            end_result['random_seed'] = args.random_seed\n            end_result['MSE'] = mse\n            end_result['MAE'] = mae\n\n            import pandas as pd\n\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n\n            if args.do_predict:\n                print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n                exp.predict(setting, True)\n\n            torch.cuda.empty_cache()\n    else:\n        ii = 0\n        setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(args.model_id,\n                                                                                                      args.model,\n                                                                                                      args.data,\n                                                                                                      args.features,\n                                                                                                      args.seq_len,\n                                                                                                      args.label_len,\n                                                                                                      args.pred_len,\n                                                                                                      args.d_model,\n                                                                                                      args.n_heads,\n                                                                                                      args.e_layers,\n                                                                                                      args.d_layers,\n                                                                                                      args.d_ff,\n                                                                                                      args.factor,\n                                                                                                      args.embed,\n                                                                                                      args.distil,\n                                                                                                      args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_tcn.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom Other_baselines.exp.exp_long_term_forecasting import Exp_Long_Term_Forecast\nfrom Other_baselines.exp.exp_short_term_forecasting import Exp_Short_Term_Forecast\nfrom utils.print_args import print_args\nimport random\nfrom Other_baselines.data_provider.data_factory import data_provider\nimport numpy as np\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser(description='DLinear')  ### DLinear iTransformer TimesNet Informer Autoformer\n\n    # basic config\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, required=False, default=1, help='status')\n    parser.add_argument('--model_id', type=str, required=False, default='test', help='model id')\n    parser.add_argument('--model', type=str, required=False, default='TCN',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    parser.add_argument('--dropout', type=float, default=0.1,\n                        help='dropout applied to layers (default: 0.1)')\n    parser.add_argument('--emb_dropout', type=float, default=0.1,\n                        help='dropout applied to the embedded layer (0 = no dropout) (default: 0.1)')\n    parser.add_argument('--kernel_size', type=int, default=3,\n                        help='kernel size (default: 3)')\n    parser.add_argument('--input_size', type=int, default=100,\n                        help='dimension of character embeddings (default: 100)')\n    # parser.add_argument('--num_channels', type=int, default=14,\n    #                     help='')\n    parser.add_argument('--nhid', type=int, default=450,\n                        help='number of hidden units per layer (default: 450)')\n    parser.add_argument('--levels', type=int, default=3,\n                        help='# of levels (default: 3)')\n\n    # data loader\n    parser.add_argument('--dataset', type=str, required=False, default='national_illness', help='dataset type')\n    parser.add_argument('--data', type=str, required=False, default='custom', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    parser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='national_illness.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=36, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=36, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=36, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.25, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--expand', type=int, default=2, help='expansion factor for Mamba')\n    parser.add_argument('--d_conv', type=int, default=4, help='conv kernel size for Mamba')\n    parser.add_argument('--top_k', type=int, default=5, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    # parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n    parser.add_argument('--channel_independence', type=int, default=1,\n                        help='0: channel dependence 1: channel independence for FreTS model')\n    parser.add_argument('--decomp_method', type=str, default='moving_avg',\n                        help='method of series decompsition, only support moving_avg or dft_decomp')\n    parser.add_argument('--use_norm', type=int, default=1, help='whether to use normalize; True 1 False 0')\n    parser.add_argument('--down_sampling_layers', type=int, default=0, help='num of down sampling layers')\n    parser.add_argument('--down_sampling_window', type=int, default=1, help='down sampling window size')\n    parser.add_argument('--down_sampling_method', type=str, default=None,\n                        help='down sampling method, only support avg, max, conv')\n    parser.add_argument('--seg_len', type=int, default=48,\n                        help='the length of segmen-wise iteration of SegRNN')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=128, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', help='use multiple gpus', default=False)  ## action='store_true',\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # metrics (dtw)\n    parser.add_argument('--use_dtw', type=bool, default=False,\n                        help='the controller of using dtw metric (dtw is time consuming, not suggested unless necessary)')\n\n    # Augmentation\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--seed', type=int, default=2, help=\"Randomization seed\")\n    parser.add_argument('--jitter', default=False, action=\"store_true\", help=\"Jitter preset augmentation\")\n    parser.add_argument('--scaling', default=False, action=\"store_true\", help=\"Scaling preset augmentation\")\n    parser.add_argument('--permutation', default=False, action=\"store_true\",\n                        help=\"Equal Length Permutation preset augmentation\")\n    parser.add_argument('--randompermutation', default=False, action=\"store_true\",\n                        help=\"Random Length Permutation preset augmentation\")\n    parser.add_argument('--magwarp', default=False, action=\"store_true\", help=\"Magnitude warp preset augmentation\")\n    parser.add_argument('--timewarp', default=False, action=\"store_true\", help=\"Time warp preset augmentation\")\n    parser.add_argument('--windowslice', default=False, action=\"store_true\", help=\"Window slice preset augmentation\")\n    parser.add_argument('--windowwarp', default=False, action=\"store_true\", help=\"Window warp preset augmentation\")\n    parser.add_argument('--rotation', default=False, action=\"store_true\", help=\"Rotation preset augmentation\")\n    parser.add_argument('--spawner', default=False, action=\"store_true\", help=\"SPAWNER preset augmentation\")\n    parser.add_argument('--dtwwarp', default=False, action=\"store_true\", help=\"DTW warp preset augmentation\")\n    parser.add_argument('--shapedtwwarp', default=False, action=\"store_true\", help=\"Shape DTW warp preset augmentation\")\n    parser.add_argument('--wdba', default=False, action=\"store_true\", help=\"Weighted DBA preset augmentation\")\n    parser.add_argument('--discdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive DTW warp preset augmentation\")\n    parser.add_argument('--discsdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive shapeDTW warp preset augmentation\")\n    parser.add_argument('--extra_tag', type=str, default=\"\", help=\"Anything extra\")\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='tcn_forecasting_0812.csv')\n\n    # parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    # parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    # parser.add_argument('--use_multi_gpu', help='use multiple gpus', default=False)  ## action='store_true',\n    # parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    args = parser.parse_args()\n    # args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n    args.use_gpu = True if torch.cuda.is_available() else False\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    # args.num_channels = [args.nhid] * (args.levels - 1) + [args.input_size]\n\n    train_data, train_loader = data_provider(args, 'train')\n\n    # args.num_channels = train_data.data_x.shape[-1]\n    args.seq_len = args.pred_len\n    args.input_size = args.pred_len\n    # args.input_size = 100\n    # args.input_size =  train_data.data_x.shape[-1]\n\n    # args.num_channels = [args.nhid] * (args.levels - 1) + [args.input_size]\n\n    args.num_channels = [train_data.data_x.shape[-1],args.input_size * 2,args.input_size]\n\n    print(\" args.num_channels = \",  args.num_channels)\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.root_path):\n        args.root_path = '/dev_data/lz/ts_forecasting_methods/ts2vec/datasets'\n        args.save_dir = '/dev_data/lz/ts_forecasting_methods/result/'\n\n    print(\"root_path = \", args.root_path)  # 输出检查\n\n    print(torch.cuda.is_available())\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print_args(args)\n    Exp = Exp_Long_Term_Forecast\n    # if args.task_name == 'long_term_forecast':\n    #     Exp = Exp_Long_Term_Forecast\n    # elif args.task_name == 'short_term_forecast':\n    #     Exp = Exp_Short_Term_Forecast\n    # else:\n    #     Exp = Exp_Long_Term_Forecast\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            exp = Exp(args)  # set experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.expand,\n                args.d_conv,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            mae, mse, rmse, mape, mspe = exp.test(setting)\n            # torch.cuda.empty_cache()\n            print(\"Success mae, mse = \", mae, mse)\n\n            end_result = {}\n            end_result['dataset'] = args.data_path\n            end_result['pred_len'] = args.pred_len\n            end_result['random_seed'] = args.random_seed\n            end_result['MSE'] = mse\n            end_result['MAE'] = mae\n\n            import pandas as pd\n\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.expand,\n            args.d_conv,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        # torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_tempo.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nfrom Other_baselines.data_provider.data_factory_tempo import data_provider\nfrom Other_baselines.utils.tools_tempo import EarlyStopping, adjust_learning_rate, vali, test\nfrom torch.utils.data import Subset\nfrom tqdm import tqdm\nfrom Other_baselines.models.PatchTST import PatchTST\nfrom Other_baselines.models.GPT4TS import GPT4TS\nfrom Other_baselines.models.TEMPO import TEMPO\n\nimport torch\nimport torch.nn as nn\nfrom numpy.random import choice\n\nimport os\nimport time\n\nimport warnings\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport argparse\nimport random\nimport sys\n\nfrom omegaconf import OmegaConf\n\n\ndef get_init_config(config_path=None):\n    config = OmegaConf.load(config_path)\n    return config\n\n\nwarnings.filterwarnings('ignore')\n\n\n\nparser = argparse.ArgumentParser(description='GPT4TS')\nparser.add_argument('--random_seed', type=int, default=42, help='random seed')\nparser.add_argument('--model_id', type=str, default='weather_GTP4TS_multi-debug')\nparser.add_argument('--checkpoints', type=str, default='/SSD/lz/ts_forecasting_methods/Other_baselines/checkpoints_multi_dataset/')\nparser.add_argument('--task_name', type=str, default='long_term_forecast')\n\nparser.add_argument('--stl_weight', type=float, default=0.01)\nparser.add_argument('--config_path', type=str, default='/SSD/lz/ts_forecasting_methods/Other_baselines/data_config.yml')\nparser.add_argument('--datasets', type=str, default='exchange')\nparser.add_argument('--target_data', type=str, default='exchange')\n# python train_tempo.py --datasets exchange --target_data exchange --data custom --data_path exchange_rate.csv --random_seed 42;\n# data loader\nparser.add_argument('--data', type=str, required=False, default='custom', help='dataset type')\n# parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\nparser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n                    help='root path of the data file')\nparser.add_argument('--data_path', type=str, default='exchange_rate.csv', help='data file')\nparser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\nparser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n\n\nparser.add_argument('--prompt', type=int, default=0)\nparser.add_argument('--num_nodes', type=int, default=1)\n\nparser.add_argument('--seq_len', type=int, default=512)\nparser.add_argument('--pred_len', type=int, default=96)\nparser.add_argument('--label_len', type=int, default=48)\n\nparser.add_argument('--decay_fac', type=float, default=0.9)\nparser.add_argument('--learning_rate', type=float, default=0.001)\nparser.add_argument('--batch_size', type=int, default=32)\nparser.add_argument('--num_workers', type=int, default=0)\nparser.add_argument('--train_epochs', type=int, default=10)\nparser.add_argument('--lradj', type=str, default='type3')  # for what\nparser.add_argument('--patience', type=int, default=5)\n\nparser.add_argument('--gpt_layers', type=int, default=6)\nparser.add_argument('--is_gpt', type=int, default=1)\nparser.add_argument('--e_layers', type=int, default=3)\nparser.add_argument('--d_model', type=int, default=768)\nparser.add_argument('--n_heads', type=int, default=4)\nparser.add_argument('--d_ff', type=int, default=768)\nparser.add_argument('--dropout', type=float, default=0.3)\nparser.add_argument('--enc_in', type=int, default=7)\nparser.add_argument('--c_out', type=int, default=7)\nparser.add_argument('--patch_size', type=int, default=16)\nparser.add_argument('--kernel_size', type=int, default=25)\n\nparser.add_argument('--loss_func', type=str, default='mse')\nparser.add_argument('--pretrain', type=int, default=1)\nparser.add_argument('--freeze', type=int, default=1)\nparser.add_argument('--model', type=str, default='TEMPO')  ### GPT4TS_multi TEMPO\nparser.add_argument('--stride', type=int, default=8)\nparser.add_argument('--max_len', type=int, default=-1)\nparser.add_argument('--hid_dim', type=int, default=16)\nparser.add_argument('--tmax', type=int, default=10)\n\nparser.add_argument('--itr', type=int, default=1)\nparser.add_argument('--gpu', type=int, default=1)\nparser.add_argument('--cos', type=int, default=0)\nparser.add_argument('--equal', type=int, default=1, help='1: equal sampling, 0: dont do the equal sampling')\nparser.add_argument('--pool', action='store_true', help='whether use prompt pool')\nparser.add_argument('--no_stl_loss', action='store_true', help='whether use prompt pool')\n\n\nparser.add_argument('--use_token', type=int, default=0)\nparser.add_argument('--electri_multiplier', type=int, default=1)\nparser.add_argument('--traffic_multiplier', type=int, default=1)\nparser.add_argument('--embed', type=str, default='timeF')\n\nparser.add_argument('--save_dir', type=str, default='/SSD/lz/ts_forecasting_methods/result/')\nparser.add_argument('--save_csv_name', type=str, default='tempo_forecasting_0729.csv')\n\n# args = parser.parse_args([])\nargs = parser.parse_args()\n\nif not os.path.exists(args.save_dir):\n    args.root_path = '/dev_data/lz/ts_forecasting_methods/ts2vec/datasets'\n    args.save_dir = '/dev_data/lz/ts_forecasting_methods/result/'\n    args.config_path = '/dev_data/lz/ts_forecasting_methods/Other_baselines/data_config.yml'\n    args.checkpoints = '/dev_data/lz/ts_forecasting_methods/Other_baselines/checkpoints_multi_dataset/'\n\n\nconfig = get_init_config(args.config_path)\n\n# fix_seed = 2021\nrandom.seed(args.random_seed)\ntorch.manual_seed(args.random_seed)\nnp.random.seed(args.random_seed)\n\nargs.itr = 1\n\nprint(args)\n\nSEASONALITY_MAP = {\n    \"minutely\": 1440,\n    \"10_minutes\": 144,\n    \"half_hourly\": 48,\n    \"hourly\": 24,\n    \"daily\": 7,\n    \"weekly\": 1,\n    \"monthly\": 12,\n    \"quarterly\": 4,\n    \"yearly\": 1\n}\n\nmses = []\nmaes = []\nfor ii in range(args.itr):\n\n    setting = '{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_gl{}_df{}_eb{}_itr{}'.format(args.model_id, 336, args.label_len,\n                                                                             args.pred_len,\n                                                                             args.d_model, args.n_heads, args.e_layers,\n                                                                             args.gpt_layers,\n                                                                             args.d_ff, args.embed, ii)\n    path = os.path.join(args.checkpoints, setting)\n    print(\"path = \", path)\n    if not os.path.exists(path):\n        os.makedirs(path)\n\n    # if args.freq == 0:\n    #     args.freq = 'h'\n\n    device = torch.device('cuda:0')\n    if args.gpu == 1:\n        device = torch.device('cuda:1')\n\n    train_data_name = args.datasets.split(',')\n    print(train_data_name)\n    train_datas = []\n    val_datas = []\n    min_sample_num = sys.maxsize\n    for dataset_singe in args.datasets.split(','):\n        print(dataset_singe)\n        # args.data = config['datasets'][dataset_singe].data\n        # args.root_path = config['datasets'][dataset_singe].root_path\n        # args.data_path = config['datasets'][dataset_singe].data_path\n        args.data_name = config['datasets'][dataset_singe].data_name\n        args.features = config['datasets'][dataset_singe].features\n        args.freq = config['datasets'][dataset_singe].freq\n        args.target = config['datasets'][dataset_singe].target\n        args.embed = config['datasets'][dataset_singe].embed\n        args.percent = config['datasets'][dataset_singe].percent\n        args.lradj = config['datasets'][dataset_singe].lradj\n\n        print(\"args.data_name = \", args.data_name)\n        print(\"args.features = \",  args.features)\n        print(\"args.freq = \", args.freq)\n        print(\"args.target = \", args.target)\n        print(\"args.embed = \", args.embed)\n\n        if args.freq == 0:\n            args.freq = 'h'\n\n        print(\"dataset: \", args.data)\n        train_data, train_loader = data_provider(args, 'train')\n        if dataset_singe not in ['ETTh1', 'ETTh2', 'ILI', 'exchange']:\n            min_sample_num = min(min_sample_num, len(train_data))\n\n        # args.percent = 20\n        vali_data, vali_loader = data_provider(args, 'val')\n        # args.percent = 100\n\n        # train_datas.append(train_data)\n        val_datas.append(vali_data)\n\n    for dataset_singe in args.datasets.split(','):\n        print(dataset_singe)\n        # args.data = config['datasets'][dataset_singe].data\n        # args.root_path = config['datasets'][dataset_singe].root_path\n        # args.data_path = config['datasets'][dataset_singe].data_path\n        args.data_name = config['datasets'][dataset_singe].data_name\n        args.features = config['datasets'][dataset_singe].features\n        args.freq = config['datasets'][dataset_singe].freq\n        args.target = config['datasets'][dataset_singe].target\n        args.embed = config['datasets'][dataset_singe].embed\n        args.percent = config['datasets'][dataset_singe].percent\n        args.lradj = config['datasets'][dataset_singe].lradj\n        if args.freq == 0:\n            args.freq = 'h'\n        # if args.freq != 'h':\n        #     args.freq = SEASONALITY_MAP[test_data.freq]\n        #     print(\"freq = {}\".format(args.freq))\n\n        print(\"dataset: \", args.data)\n        train_data, train_loader = data_provider(args, 'train')\n        if dataset_singe not in ['ETTh1', 'ETTh2', 'ILI', 'exchange'] and args.equal == 1:\n            train_data = Subset(train_data, choice(len(train_data), min_sample_num))\n        if args.electri_multiplier > 1 and args.equal == 1 and dataset_singe in ['electricity']:\n            train_data = Subset(train_data, choice(len(train_data), int(min_sample_num * args.electri_multiplier)))\n        if args.traffic_multiplier > 1 and args.equal == 1 and dataset_singe in ['traffic']:\n            train_data = Subset(train_data, choice(len(train_data), int(min_sample_num * args.traffic_multiplier)))\n        train_datas.append(train_data)\n\n    if len(train_datas) > 1:\n        train_data = torch.utils.data.ConcatDataset([train_datas[0], train_datas[1]])\n        vali_data = torch.utils.data.ConcatDataset([val_datas[0], val_datas[1]])\n        for i in range(2, len(train_datas)):\n            train_data = torch.utils.data.ConcatDataset([train_data, train_datas[i]])\n\n            vali_data = torch.utils.data.ConcatDataset([vali_data, val_datas[i]])\n\n        # import pdb; pdb.set_trace()\n        print(\"Way1\", len(train_data))\n\n        train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,\n                                                   num_workers=args.num_workers)\n        vali_loader = torch.utils.data.DataLoader(vali_data, batch_size=args.batch_size, shuffle=False,\n                                                  num_workers=args.num_workers)\n\n        # args.data = config['datasets'][args.target_data].data\n        # args.root_path = config['datasets'][args.target_data].root_path\n        # args.data_path = config['datasets'][args.target_data].data_path\n        args.data_name = config['datasets'][args.target_data].data_name\n        args.features = config['datasets'][dataset_singe].features\n        args.freq = config['datasets'][args.target_data].freq\n        args.target = config['datasets'][args.target_data].target\n        args.embed = config['datasets'][args.target_data].embed\n        args.percent = config['datasets'][args.target_data].percent\n        args.lradj = config['datasets'][args.target_data].lradj\n        if args.freq == 0:\n            args.freq = 'h'\n        test_data, test_loader = data_provider(args, 'test')\n\n    time_now = time.time()\n    train_steps = len(train_loader)  # 190470 -52696\n\n    if args.model == 'PatchTST':\n        model = PatchTST(args, device)\n        model.to(device)\n    elif args.model == 'TEMPO':\n        model = TEMPO(args, device)\n        model.to(device)\n    else:\n        model = GPT4TS(args, device)\n    # mse, mae = test(model, test_data, test_loader, args, device, ii)\n\n    params = model.parameters()\n    model_optim = torch.optim.Adam(params, lr=args.learning_rate)\n\n    early_stopping = EarlyStopping(patience=args.patience, verbose=True)\n    if args.loss_func == 'mse':\n        criterion = nn.MSELoss()\n    elif args.loss_func == 'smape':\n        class SMAPE(nn.Module):\n            def __init__(self):\n                super(SMAPE, self).__init__()\n\n            def forward(self, pred, true):\n                return torch.mean(200 * torch.abs(pred - true) / (torch.abs(pred) + torch.abs(true) + 1e-8))\n\n\n        criterion = SMAPE()\n\n    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model_optim, T_max=args.tmax, eta_min=1e-8)\n\n    for epoch in range(args.train_epochs):\n\n        iter_count = 0\n        train_loss = []\n        epoch_time = time.time()\n        print(\"len(train_loader) = \", len(train_loader))\n        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark, seq_trend, seq_seasonal, seq_resid) in tqdm(\n                enumerate(train_loader), total=len(train_loader)):\n\n            iter_count += 1\n            model_optim.zero_grad()\n            batch_x = batch_x.float().to(device)\n\n            batch_y = batch_y.float().to(device)\n            batch_x_mark = batch_x_mark.float().to(device)\n            batch_y_mark = batch_y_mark.float().to(device)\n\n            seq_trend = seq_trend.float().to(device)\n            seq_seasonal = seq_seasonal.float().to(device)\n            seq_resid = seq_resid.float().to(device)\n\n            # print(seq_seasonal.shape)\n            if args.model == 'TEMPO' or 'multi' in args.model:\n                outputs, loss_local = model(batch_x, ii, seq_trend, seq_seasonal,\n                                            seq_resid)  # + model(seq_seasonal, ii) + model(seq_resid, ii)\n            elif 'former' in args.model:\n                dec_inp = torch.zeros_like(batch_y[:, -args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :args.label_len, :], dec_inp], dim=1).float().to(device)\n                outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n            else:\n                outputs = model(batch_x, ii)\n            outputs = outputs[:, -args.pred_len:, :]\n            batch_y = batch_y[:, -args.pred_len:, :].to(device)\n            loss = criterion(outputs, batch_y)\n            if args.model == 'GPT4TS_multi' or args.model == 'TEMPO_t5':\n                if not args.no_stl_loss:\n                    loss += args.stl_weight * loss_local\n            train_loss.append(loss.item())\n\n            if (i + 1) % 1000 == 0:\n                print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                speed = (time.time() - time_now) / iter_count\n                left_time = speed * ((args.train_epochs - epoch) * train_steps - i)\n                print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                iter_count = 0\n                time_now = time.time()\n            loss.backward()\n            model_optim.step()\n\n        print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n\n        train_loss = np.average(train_loss)\n        vali_loss = vali(model, vali_data, vali_loader, criterion, args, device, ii)\n\n        print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f}\".format(\n            epoch + 1, train_steps, train_loss, vali_loss))\n\n        if args.cos:\n            scheduler.step()\n            print(\"lr = {:.10f}\".format(model_optim.param_groups[0]['lr']))\n        else:\n            adjust_learning_rate(model_optim, epoch + 1, args)\n        early_stopping(vali_loss, model, path)\n        if early_stopping.early_stop:\n            print(\"Early stopping\")\n            break\n\n    best_model_path = path + '/' + 'checkpoint.pth'\n    model.load_state_dict(torch.load(best_model_path), strict=False)\n    print(\"------------------------------------\")\n    test_data, test_loader = data_provider(args, 'test')\n    mse, mae = test(model, test_data, test_loader, args, device, ii)\n    torch.cuda.empty_cache()\n    print('test on the ' + str(args.target_data) + ' dataset: mse:' + str(mse) + ' mae:' + str(mae))\n\n    end_result = {}\n    end_result['dataset'] = args.data_path\n    end_result['pred_len'] = args.pred_len\n    end_result['random_seed'] = args.random_seed\n    end_result['MSE'] = mse\n    end_result['MAE'] = mae\n\n    import pandas as pd\n\n    # 指定保存路径\n    save_path = args.save_dir + args.save_csv_name\n\n    # 转换字典为 DataFrame\n    df_new = pd.DataFrame([end_result])\n\n    # 检查文件是否存在\n    if os.path.exists(save_path):\n        # 文件存在，读取现有数据\n        df_existing = pd.read_csv(save_path, index_col=0)\n        # 将新数据附加到现有数据框中\n        df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n    else:\n        # 文件不存在，创建新的数据框\n        df_combined = df_new\n\n    # 保存 DataFrame 为 CSV 文件\n    df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n    print(\"Save success!!!\")\n\n    mses.append(mse)\n    maes.append(mae)\nprint(\"mse_mean = {:.4f}, mse_std = {:.4f}\".format(np.mean(mses), np.std(mses)))\nprint(\"mae_mean = {:.4f}, mae_std = {:.4f}\".format(np.mean(maes), np.std(maes)))\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_timesnet.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport os\nimport torch\nfrom Other_baselines.exp.exp_long_term_forecasting import Exp_Long_Term_Forecast\nfrom Other_baselines.exp.exp_short_term_forecasting import Exp_Short_Term_Forecast\nfrom utils.print_args import print_args\nimport random\nimport numpy as np\n\nif __name__ == '__main__':\n\n    parser = argparse.ArgumentParser(description='DLinear')  ### DLinear iTransformer TimesNet Informer Autoformer\n\n    # basic config\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--task_name', type=str, required=False, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--is_training', type=int, required=False, default=1, help='status')\n    parser.add_argument('--model_id', type=str, required=False, default='test', help='model id')\n    parser.add_argument('--model', type=str, required=False, default='TimesNet',\n                        help='model name, options: [Autoformer, Transformer, TimesNet]')\n\n    # data loader\n    parser.add_argument('--data', type=str, required=False, default='custom', help='dataset type')\n    # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\n    parser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='electricity.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n    # forecasting task\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\n    # inputation task\n    parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio')\n\n    # anomaly detection task\n    parser.add_argument('--anomaly_ratio', type=float, default=0.25, help='prior anomaly ratio (%)')\n\n    # model define\n    parser.add_argument('--expand', type=int, default=2, help='expansion factor for Mamba')\n    parser.add_argument('--d_conv', type=int, default=4, help='conv kernel size for Mamba')\n    parser.add_argument('--top_k', type=int, default=5, help='for TimesBlock')\n    parser.add_argument('--num_kernels', type=int, default=6, help='for Inception')\n    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\n    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\n    parser.add_argument('--c_out', type=int, default=7, help='output size')\n    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')\n    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')\n    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\n    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\n    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\n    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n    parser.add_argument('--distil', action='store_false',\n                        help='whether to use distilling in encoder, using this argument means not using distilling',\n                        default=True)\n    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--activation', type=str, default='gelu', help='activation')\n    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\n    parser.add_argument('--channel_independence', type=int, default=1,\n                        help='0: channel dependence 1: channel independence for FreTS model')\n    parser.add_argument('--decomp_method', type=str, default='moving_avg',\n                        help='method of series decompsition, only support moving_avg or dft_decomp')\n    parser.add_argument('--use_norm', type=int, default=1, help='whether to use normalize; True 1 False 0')\n    parser.add_argument('--down_sampling_layers', type=int, default=0, help='num of down sampling layers')\n    parser.add_argument('--down_sampling_window', type=int, default=1, help='down sampling window size')\n    parser.add_argument('--down_sampling_method', type=str, default=None,\n                        help='down sampling method, only support avg, max, conv')\n    parser.add_argument('--seg_len', type=int, default=48,\n                        help='the length of segmen-wise iteration of SegRNN')\n\n    # optimization\n    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\n    parser.add_argument('--itr', type=int, default=1, help='experiments times')\n    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')\n    parser.add_argument('--batch_size', type=int, default=8, help='batch size of train input data')\n    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')\n    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\n    parser.add_argument('--des', type=str, default='test', help='exp description')\n    parser.add_argument('--loss', type=str, default='MSE', help='loss function')\n    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\n    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n    # GPU\n    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\n    parser.add_argument('--gpu', type=int, default=0, help='gpu')\n    parser.add_argument('--use_multi_gpu', help='use multiple gpus', default=False)  ## action='store_true',\n    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')\n\n    # de-stationary projector params\n    parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128],\n                        help='hidden layer dimensions of projector (List)')\n    parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector')\n\n    # metrics (dtw)\n    parser.add_argument('--use_dtw', type=bool, default=False,\n                        help='the controller of using dtw metric (dtw is time consuming, not suggested unless necessary)')\n\n    # Augmentation\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--seed', type=int, default=2, help=\"Randomization seed\")\n    parser.add_argument('--jitter', default=False, action=\"store_true\", help=\"Jitter preset augmentation\")\n    parser.add_argument('--scaling', default=False, action=\"store_true\", help=\"Scaling preset augmentation\")\n    parser.add_argument('--permutation', default=False, action=\"store_true\",\n                        help=\"Equal Length Permutation preset augmentation\")\n    parser.add_argument('--randompermutation', default=False, action=\"store_true\",\n                        help=\"Random Length Permutation preset augmentation\")\n    parser.add_argument('--magwarp', default=False, action=\"store_true\", help=\"Magnitude warp preset augmentation\")\n    parser.add_argument('--timewarp', default=False, action=\"store_true\", help=\"Time warp preset augmentation\")\n    parser.add_argument('--windowslice', default=False, action=\"store_true\", help=\"Window slice preset augmentation\")\n    parser.add_argument('--windowwarp', default=False, action=\"store_true\", help=\"Window warp preset augmentation\")\n    parser.add_argument('--rotation', default=False, action=\"store_true\", help=\"Rotation preset augmentation\")\n    parser.add_argument('--spawner', default=False, action=\"store_true\", help=\"SPAWNER preset augmentation\")\n    parser.add_argument('--dtwwarp', default=False, action=\"store_true\", help=\"DTW warp preset augmentation\")\n    parser.add_argument('--shapedtwwarp', default=False, action=\"store_true\", help=\"Shape DTW warp preset augmentation\")\n    parser.add_argument('--wdba', default=False, action=\"store_true\", help=\"Weighted DBA preset augmentation\")\n    parser.add_argument('--discdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive DTW warp preset augmentation\")\n    parser.add_argument('--discsdtw', default=False, action=\"store_true\",\n                        help=\"Discrimitive shapeDTW warp preset augmentation\")\n    parser.add_argument('--extra_tag', type=str, default=\"\", help=\"Anything extra\")\n\n    parser.add_argument('--save_dir', type=str, default='/SSD/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='timesnet_forecasting_0730.csv')\n\n    args = parser.parse_args()\n    # args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n    args.use_gpu = True if torch.cuda.is_available() else False\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.root_path):\n        args.root_path = '/dev_data/lz/ts_forecasting_methods/ts2vec/datasets'\n        args.save_dir = '/dev_data/lz/ts_forecasting_methods/result/'\n\n    print(\"root_path = \", args.root_path)  # 输出检查\n\n    from Other_baselines.data_provider.data_factory import data_provider\n\n    train_data, train_loader = data_provider(args, 'train')\n\n    print(\"train_data.shape = \", train_data.data_x.shape)\n    args.enc_in = train_data.data_x.shape[-1]\n    args.dec_in = train_data.data_x.shape[-1]\n    args.c_out = train_data.data_x.shape[-1]\n\n    print(torch.cuda.is_available())\n\n    if args.use_gpu and args.use_multi_gpu:\n        args.devices = args.devices.replace(' ', '')\n        device_ids = args.devices.split(',')\n        args.device_ids = [int(id_) for id_ in device_ids]\n        args.gpu = args.device_ids[0]\n\n    print('Args in experiment:')\n    print_args(args)\n\n    if args.task_name == 'long_term_forecast':\n        Exp = Exp_Long_Term_Forecast\n    elif args.task_name == 'short_term_forecast':\n        Exp = Exp_Short_Term_Forecast\n    else:\n        Exp = Exp_Long_Term_Forecast\n\n    if args.is_training:\n        for ii in range(args.itr):\n            # setting record of experiments\n            exp = Exp(args)  # set experiments\n            setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n                args.task_name,\n                args.model_id,\n                args.model,\n                args.data,\n                args.features,\n                args.seq_len,\n                args.label_len,\n                args.pred_len,\n                args.d_model,\n                args.n_heads,\n                args.e_layers,\n                args.d_layers,\n                args.d_ff,\n                args.expand,\n                args.d_conv,\n                args.factor,\n                args.embed,\n                args.distil,\n                args.des, ii)\n\n            print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n            exp.train(setting)\n\n            print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            mae, mse, rmse, mape, mspe = exp.test(setting)\n            # torch.cuda.empty_cache()\n            print(\"Success mae, mse = \", mae, mse)\n\n            end_result = {}\n            end_result['dataset'] = args.data_path\n            end_result['pred_len'] = args.pred_len\n            end_result['random_seed'] = args.random_seed\n            end_result['MSE'] = mse\n            end_result['MAE'] = mae\n\n            import pandas as pd\n\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n    else:\n        ii = 0\n        setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_expand{}_dc{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.task_name,\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.expand,\n            args.d_conv,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        exp.test(setting, test=1)\n        # torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/train_ts2vec.py",
    "content": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport torch\nimport numpy as np\nimport argparse\nimport os\nimport random\nimport time\nimport datetime\nfrom ts2vec.ts2vec import TS2Vec\nimport  ts2vec.tasks as tasks\nimport  ts2vec.datautils as datautils\nfrom ts2vec.utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\nfrom Other_baselines.data_provider.data_factory import data_provider\nfrom ts2vec.tasks import _eval_protocols as eval_protocols\n\n\ndef generate_pred_samples(features, data, pred_len, drop=0):\n    n = data.shape[1]\n    features = features[:, :-pred_len]\n    labels = np.stack([ data[:, i:1+n+i-pred_len] for i in range(pred_len)], axis=2)[:, 1:]\n    features = features[:, drop:]\n    labels = labels[:, drop:]\n    return features.reshape(-1, features.shape[-1]), \\\n            labels.reshape(-1, labels.shape[2]*labels.shape[3])\n\n\ndef cal_metrics(pred, target):\n    return {\n        'MSE': ((pred - target) ** 2).mean(),\n        'MAE': np.abs(pred - target).mean()\n    }\n\n\ndef eval_forecasting_new(model, train_data, valid_data, test_data, pred_lens):\n    padding = 200\n\n    t = time.time()\n    train_repr = model.encode(\n        train_data,\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=32\n    )\n    valid_repr = model.encode(\n        valid_data,\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=32\n    )\n    test_repr = model.encode(\n        test_data,\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=32\n    )\n    ts2vec_infer_time = time.time() - t\n\n    print(\"train_data.shape = \", train_data.shape)\n\n    # train_repr = all_repr[:, train_slice]\n    # valid_repr = all_repr[:, valid_slice]\n    # test_repr = all_repr[:, test_slice]\n    # print(\"train_repr.shape = \", train_repr.shape, \", valid_repr.shape = \", valid_repr.shape, \", test_repr.shape = \",\n    #       test_repr.shape)\n    #\n    # print(\"n_covariate_cols = \", n_covariate_cols)\n    #\n    # train_data = data[:, train_slice, n_covariate_cols:]\n    # valid_data = data[:, valid_slice, n_covariate_cols:]\n    # test_data = data[:, test_slice, n_covariate_cols:]\n\n    print(\"train_data.shape = \", train_data.shape, \", valid_data.shape = \", valid_data.shape, \", test_data.shape = \",\n          test_data.shape)\n\n    ours_result = {}\n    lr_train_time = {}\n    lr_infer_time = {}\n    out_log = {}\n    for pred_len in pred_lens:\n        train_features, train_labels = generate_pred_samples(train_repr, train_data, pred_len, drop=padding)\n        valid_features, valid_labels = generate_pred_samples(valid_repr, valid_data, pred_len)\n        test_features, test_labels = generate_pred_samples(test_repr, test_data, pred_len)\n\n        t = time.time()\n        lr = eval_protocols.fit_ridge(train_features, train_labels, valid_features, valid_labels)\n        lr_train_time[pred_len] = time.time() - t\n\n        t = time.time()\n        test_pred = lr.predict(test_features)\n        lr_infer_time[pred_len] = time.time() - t\n\n        ori_shape = test_data.shape[0], -1, pred_len, test_data.shape[2]\n        test_pred = test_pred.reshape(ori_shape)\n        test_labels = test_labels.reshape(ori_shape)\n\n        # if test_data.shape[0] > 1:\n        #     test_pred_inv = scaler.inverse_transform(test_pred.swapaxes(0, 3)).swapaxes(0, 3)\n        #     test_labels_inv = scaler.inverse_transform(test_labels.swapaxes(0, 3)).swapaxes(0, 3)\n        # else:\n        #     print(\"test_pred.shape = \", test_pred.shape, \", test_labels.shape = \", test_labels.shape)\n        #     print(\"test_pred.swapaxes(0, 3).shape = \", test_pred.swapaxes(0, 3).shape)\n        #     test_pred_inv = scaler.inverse_transform(test_pred)\n        #     test_labels_inv = scaler.inverse_transform(test_labels)\n\n        out_log[pred_len] = {\n            'norm': test_pred,\n            # 'raw': test_pred_inv,\n            'norm_gt': test_labels,\n            # 'raw_gt': test_labels_inv\n        }\n        ours_result[pred_len] = {\n            'norm': cal_metrics(test_pred, test_labels),\n            # 'raw': cal_metrics(test_pred_inv, test_labels_inv)\n        }\n\n    eval_res = {\n        'ours': ours_result,\n        'ts2vec_infer_time': ts2vec_infer_time,\n        'lr_train_time': lr_train_time,\n        'lr_infer_time': lr_infer_time\n    }\n    return out_log, eval_res\n\n\ndef save_checkpoint_callback(\n        save_every=1,\n        unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n\n    return callback\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # parser.add_argument('dataset', help='The dataset name')\n    parser.add_argument('--random_seed', type=int, default=42, help='random seed')\n    parser.add_argument('--dataset', default='traffic',\n                        help='The dataset name')  ## 'ETTh1', 'ETTh2', 'electricity'  ETTm1\n    # parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--run_name', default='ts2Vec',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    # parser.add_argument('--loader', type=str, required=True, help='The data loader used to load the experimental data. This can be set to UCR, '\n    #                                                               'UEA, forecast_csv, forecast_csv_univar, anomaly, or anomaly_coldstart')\n    parser.add_argument('--loader', type=str, default='forecast_csv',\n                        help='The data loader used to load the experimental data.')  ## forecast_csv forecast_csv_univar\n    parser.add_argument('--gpu', type=int, default=0,\n                        help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000,\n                        help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None,\n                        help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=6,\n                        help='The maximum allowed number of threads used by this process')\n    # parser.add_argument('--eval', action=\"store_true\", help='Whether to perform evaluation after training')\n    parser.add_argument('--eval', default=True,\n                        help='Whether to perform evaluation after training')  ## action=\"store_true\"\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='ts2vec_forecasting_0730.csv')\n\n\n\n\n    parser.add_argument('--task_name', type=str, default='long_term_forecast',\n                        help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n    parser.add_argument('--data', type=str, default='custom', help='dataset type')\n    parser.add_argument('--root_path', type=str, default='/dev_data/lz/ts_forecasting_methods/ts2vec/datasets',\n                        help='root path of the data file')\n    parser.add_argument('--data_path', type=str, default='traffic.csv', help='data file')\n    parser.add_argument('--features', type=str, default='M',\n                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n    parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\n    parser.add_argument('--freq', type=str, default='h',\n                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n    parser.add_argument('--embed', type=str, default='timeF',\n                        help='time features encoding, options:[timeF, fixed, learned]')\n    parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\n    parser.add_argument('--label_len', type=int, default=48, help='start token length')\n    parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n    parser.add_argument('--seasonal_patterns', type=str, default='Monthly',\n                        help='subset for M4')  ## Hourly Daily Weekly Monthly Quarterly Yearly\n    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n    parser.add_argument('--augmentation_ratio', type=int, default=0, help=\"How many times to augment\")\n    parser.add_argument('--num_workers', type=int, default=2, help='data loader num workers')\n\n    args = parser.parse_args()\n\n    fix_seed = args.random_seed\n    random.seed(fix_seed)\n    torch.manual_seed(fix_seed)\n    np.random.seed(fix_seed)\n\n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/ts_forecasting_methods/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n\n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n\n    print('Loading data... ', end='')\n    if args.loader == 'UCR':\n        task_type = 'classification'\n        train_data, train_labels, test_data, test_labels = datautils.load_UCR(args.dataset)\n\n    elif args.loader == 'UEA':\n        task_type = 'classification'\n        train_data, train_labels, test_data, test_labels = datautils.load_UEA(args.dataset)\n\n    elif args.loader == 'forecast_csv':\n        task_type = 'forecasting'\n        # data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(\n        #     args.dataset)\n        # train_data = data[:, train_slice]\n        #\n        # _train_data = data[:, train_slice, n_covariate_cols:]\n        # valid_data = data[:, valid_slice, n_covariate_cols:]\n        # test_data = data[:, test_slice, n_covariate_cols:]\n        #\n        #\n        #\n        #\n        # print(\"data.shape = \", data.shape)\n        #\n        # print(\"train_slice = \", train_slice, \", valid_slice = \", valid_slice, \", test_slice = \", test_slice)\n        #\n        # print(\"train_data.shape = \", _train_data.shape, \", valid_data.shape = \", valid_data.shape,\n        #       \", test_data.shape = \",\n        #       test_data.shape)\n\n        train_data, train_loader = data_provider(args, 'train')\n        vali_data, vali_loader = data_provider(args, 'val')\n        test_data, test_loader = data_provider(args, 'test')\n\n        print(\"dataset name = \", args.data_path)\n\n        print(\"type train_data = \", type(train_data))\n\n        print(\"train_data = \", train_data)\n        print(train_data.data_x.shape, train_data.data_y.shape)\n\n        print(\"train_data = \", train_data)\n        print(vali_data.data_x.shape, vali_data.data_y.shape)\n\n        print(\"train_data = \", train_data)\n        print(test_data.data_x.shape, test_data.data_y.shape)\n\n        new_train_data = train_data.data_x[np.newaxis, :, :]\n        new_vali_data = vali_data.data_x[np.newaxis, :, :]\n        new_test_data = test_data.data_x[np.newaxis, :, :]\n\n        print(\"new_train_data = \", new_train_data.shape, new_vali_data.shape, new_test_data.shape)\n\n\n\n\n    elif args.loader == 'forecast_csv_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(\n            args.dataset, univar=True)\n        train_data = data[:, train_slice]\n        print(\"raw data.shape = \", data.shape, \", train_data.shape = \", train_data.shape)\n\n    elif args.loader == 'forecast_npy':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(\n            args.dataset)\n        train_data = data[:, train_slice]\n\n\n\n    elif args.loader == 'forecast_npy_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(\n            args.dataset, univar=True)\n        train_data = data[:, train_slice]\n\n    elif args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n            args.dataset)\n        train_data = datautils.gen_ano_train_data(all_train_data)\n\n    elif args.loader == 'anomaly_coldstart':\n        task_type = 'anomaly_detection_coldstart'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(\n            args.dataset)\n        train_data, _, _, _ = datautils.load_UCR('FordA')\n\n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n\n    if args.irregular > 0:\n        if task_type == 'classification':\n            train_data = data_dropout(train_data, args.irregular)\n            test_data = data_dropout(test_data, args.irregular)\n        else:\n            raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n    print(\"output_dims=args.repr_dims = \", args.repr_dims, \", input_dims = \", new_train_data.shape[-1])\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n\n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n\n    t = time.time()\n\n    model = TS2Vec(\n        input_dims=new_train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.fit(\n        new_train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\\n\")\n\n    if args.eval:\n        if task_type == 'classification':\n            out, eval_res = tasks.eval_classification(model, train_data, train_labels, test_data, test_labels,\n                                                      eval_protocol='svm')\n        elif task_type == 'forecasting':\n            print(\"\")\n            # out, eval_res = tasks.eval_forecasting(model, data, train_slice, valid_slice, test_slice, scaler, pred_lens,\n            #                                        n_covariate_cols)\n            pred_lens = [96, 192, 336, 720]\n            if args.dataset == 'national_illness':\n                pred_lens = [24, 36, 48, 60]\n            out, eval_res = eval_forecasting_new(model, new_train_data, new_vali_data, new_test_data, pred_lens)\n\n            # print(\"ts2vec out = \", out)\n            print(\"ts2vec eval_res = \", eval_res)\n\n            end_result = {}\n            end_result['dataset'] = args.dataset\n            end_result['random_seed'] = args.random_seed\n            for _pred in pred_lens:\n                _MSE = str(_pred) + \"_MSE\"\n                end_result[_MSE] = eval_res['ours'][_pred]['norm']['MSE']\n                _MAE = str(_pred) + \"_MAE\"\n                end_result[_MAE] = eval_res['ours'][_pred]['norm']['MAE']\n\n\n\n\n            # end_result['24_MSE'] = eval_res['ours'][24]['norm']['MSE']\n            # end_result['24_MAE'] = eval_res['ours'][24]['norm']['MAE']\n            #\n            # end_result['48_MSE'] = eval_res['ours'][48]['norm']['MSE']\n            # end_result['48_MAE'] = eval_res['ours'][48]['norm']['MAE']\n            #\n            # if args.dataset == 'ETTm1':\n            #     end_result['168_MSE'] = eval_res['ours'][96]['norm']['MSE']\n            #     end_result['168_MAE'] = eval_res['ours'][96]['norm']['MAE']\n            #\n            #     end_result['336_MSE'] = eval_res['ours'][288]['norm']['MSE']\n            #     end_result['336_MAE'] = eval_res['ours'][288]['norm']['MAE']\n            #\n            #     end_result['720_MSE'] = eval_res['ours'][672]['norm']['MSE']\n            #     end_result['720_MAE'] = eval_res['ours'][672]['norm']['MAE']\n            # else:\n            #\n            #     end_result['168_MSE'] = eval_res['ours'][168]['norm']['MSE']\n            #     end_result['168_MAE'] = eval_res['ours'][168]['norm']['MAE']\n            #\n            #     end_result['336_MSE'] = eval_res['ours'][336]['norm']['MSE']\n            #     end_result['336_MAE'] = eval_res['ours'][336]['norm']['MAE']\n            #\n            #     end_result['720_MSE'] = eval_res['ours'][720]['norm']['MSE']\n            #     end_result['720_MAE'] = eval_res['ours'][720]['norm']['MAE']\n\n            import pandas as pd\n\n            # 转换字典为 DataFrame\n            # df = pd.DataFrame([eval_res])\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n\n\n\n        elif task_type == 'anomaly_detection':\n            out, eval_res = tasks.eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps,\n                                                         all_test_data, all_test_labels, all_test_timestamps, delay)\n        elif task_type == 'anomaly_detection_coldstart':\n            out, eval_res = tasks.eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels,\n                                                                   all_train_timestamps, all_test_data, all_test_labels,\n                                                                   all_test_timestamps, delay)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/ADFtest.py",
    "content": "import pandas as pd\nimport numpy as np\nimport os\nfrom statsmodels.tsa.stattools import adfuller\nfrom arch.unitroot import ADF\n\ndef calculate_ADF(root_path,data_path):\n    df_raw = pd.read_csv(os.path.join(root_path,data_path))\n    cols = list(df_raw.columns)\n    cols.remove('date')\n    df_raw = df_raw[cols]\n    adf_list = []\n    for i in cols:\n        df_data = df_raw[i]\n        adf = adfuller(df_data, maxlag = 1)\n        print(adf)\n        adf_list.append(adf)\n    return np.array(adf_list)\n\ndef calculate_target_ADF(root_path,data_path,target='OT'):\n    df_raw = pd.read_csv(os.path.join(root_path,data_path))\n    target_cols = target.split(',')\n    # df_data = df_raw[target]\n    df_raw = df_raw[target_cols]\n    adf_list = []\n    for i in target_cols:\n        df_data = df_raw[i]\n        adf = adfuller(df_data, maxlag = 1)\n        # print(adf)\n        adf_list.append(adf)\n    return np.array(adf_list)\n\ndef archADF(root_path, data_path):\n    df = pd.read_csv(os.path.join(root_path,data_path))\n    cols = df.columns[1:]\n    stats = 0\n    for target_col in cols:\n        series = df[target_col].values\n        adf = ADF(series)\n        stat = adf.stat\n        stats += stat\n    return stats/len(cols)\n\nif __name__ == '__main__':\n\n    # * Exchange - result: -1.902402344564288 | report: -1.889\n    ADFmetric = archADF(root_path=\"./dataset/exchange_rate/\",data_path=\"exchange_rate.csv\")\n    print(\"Exchange ADF metric\", ADFmetric)\n\n    # * Illness - result: -5.33416661870624 | report: -5.406\n    ADFmetric = archADF(root_path=\"./dataset/illness/\",data_path=\"national_illness.csv\") \n    print(\"Illness ADF metric\", ADFmetric)\n\n    # * ETTm2 - result: -5.663628743471695 | report: -6.225\n    ADFmetric = archADF(root_path=\"./dataset/ETT-small/\",data_path=\"ETTm2.csv\")\n    print(\"ETTm2 ADF metric\", ADFmetric)\n\n    # * Electricity - result: -8.44485821939281 | report: -8.483\n    ADFmetric = archADF(root_path=\"./dataset/electricity/\",data_path=\"electricity.csv\")\n    print(\"Electricity ADF metric\", ADFmetric)\n\n    # * Traffic - result: -15.020978067839014 | report: -15.046\n    ADFmetric = archADF(root_path=\"./dataset/traffic/\",data_path=\"traffic.csv\")\n    print(\"Traffic ADF metric\", ADFmetric)\n\n    # * Weather - result: -26.681433085204866 | report: -26.661\n    ADFmetric = archADF(root_path=\"./dataset/weather/\",data_path=\"weather.csv\")\n    print(\"Weather ADF metric\", ADFmetric)\n\n\n    # print(ADFmetric)\n\n    # mean_ADFmetric = ADFmetric[:,0].mean()\n    # print(mean_ADFmetric)"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/augmentation.py",
    "content": "import numpy as np\nfrom tqdm import tqdm\n\ndef jitter(x, sigma=0.03):\n    # https://arxiv.org/pdf/1706.00527.pdf\n    return x + np.random.normal(loc=0., scale=sigma, size=x.shape)\n\n\ndef scaling(x, sigma=0.1):\n    # https://arxiv.org/pdf/1706.00527.pdf\n    factor = np.random.normal(loc=1., scale=sigma, size=(x.shape[0],x.shape[2]))\n    return np.multiply(x, factor[:,np.newaxis,:])\n\ndef rotation(x):\n    x = np.array(x)\n    flip = np.random.choice([-1, 1], size=(x.shape[0],x.shape[2]))\n    rotate_axis = np.arange(x.shape[2])\n    np.random.shuffle(rotate_axis)\n    return flip[:,np.newaxis,:] * x[:,:,rotate_axis]\n\ndef permutation(x, max_segments=5, seg_mode=\"equal\"):\n    orig_steps = np.arange(x.shape[1])\n    \n    num_segs = np.random.randint(1, max_segments, size=(x.shape[0]))\n    \n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        if num_segs[i] > 1:\n            if seg_mode == \"random\":\n                split_points = np.random.choice(x.shape[1]-2, num_segs[i]-1, replace=False)\n                split_points.sort()\n                splits = np.split(orig_steps, split_points)\n            else:\n                splits = np.array_split(orig_steps, num_segs[i])\n            warp = np.concatenate(np.random.permutation(splits)).ravel()\n            # ? Question: What is the point of making segments?\n            # for i in range(len(splits)):\n            #     permute = np.random.permutation(splits[i])\n\n\n            ret[i] = pat[warp]\n        else:\n            ret[i] = pat\n    return ret\n\ndef magnitude_warp(x, sigma=0.2, knot=4):\n    from scipy.interpolate import CubicSpline\n    orig_steps = np.arange(x.shape[1])\n    \n    random_warps = np.random.normal(loc=1.0, scale=sigma, size=(x.shape[0], knot+2, x.shape[2]))\n    warp_steps = (np.ones((x.shape[2],1))*(np.linspace(0, x.shape[1]-1., num=knot+2))).T\n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        warper = np.array([CubicSpline(warp_steps[:,dim], random_warps[i,:,dim])(orig_steps) for dim in range(x.shape[2])]).T\n        ret[i] = pat * warper\n\n    return ret\n\ndef time_warp(x, sigma=0.2, knot=4):\n    from scipy.interpolate import CubicSpline\n    orig_steps = np.arange(x.shape[1])\n    \n    random_warps = np.random.normal(loc=1.0, scale=sigma, size=(x.shape[0], knot+2, x.shape[2]))\n    warp_steps = (np.ones((x.shape[2],1))*(np.linspace(0, x.shape[1]-1., num=knot+2))).T\n    \n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        for dim in range(x.shape[2]):\n            time_warp = CubicSpline(warp_steps[:,dim], warp_steps[:,dim] * random_warps[i,:,dim])(orig_steps)\n            scale = (x.shape[1]-1)/time_warp[-1]\n            ret[i,:,dim] = np.interp(orig_steps, np.clip(scale*time_warp, 0, x.shape[1]-1), pat[:,dim]).T\n    return ret\n\ndef window_slice(x, reduce_ratio=0.9):\n    # https://halshs.archives-ouvertes.fr/halshs-01357973/document\n    target_len = np.ceil(reduce_ratio*x.shape[1]).astype(int)\n    if target_len >= x.shape[1]:\n        return x\n    starts = np.random.randint(low=0, high=x.shape[1]-target_len, size=(x.shape[0])).astype(int)\n    ends = (target_len + starts).astype(int)\n    \n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        for dim in range(x.shape[2]):\n            ret[i,:,dim] = np.interp(np.linspace(0, target_len, num=x.shape[1]), np.arange(target_len), pat[starts[i]:ends[i],dim]).T\n    return ret\n\ndef window_warp(x, window_ratio=0.1, scales=[0.5, 2.]):\n    # https://halshs.archives-ouvertes.fr/halshs-01357973/document\n    warp_scales = np.random.choice(scales, x.shape[0])\n    warp_size = np.ceil(window_ratio*x.shape[1]).astype(int)\n    window_steps = np.arange(warp_size)\n        \n    window_starts = np.random.randint(low=1, high=x.shape[1]-warp_size-1, size=(x.shape[0])).astype(int)\n    window_ends = (window_starts + warp_size).astype(int)\n            \n    ret = np.zeros_like(x)\n    for i, pat in enumerate(x):\n        for dim in range(x.shape[2]):\n            start_seg = pat[:window_starts[i],dim]\n            window_seg = np.interp(np.linspace(0, warp_size-1, num=int(warp_size*warp_scales[i])), window_steps, pat[window_starts[i]:window_ends[i],dim])\n            end_seg = pat[window_ends[i]:,dim]\n            warped = np.concatenate((start_seg, window_seg, end_seg))                \n            ret[i,:,dim] = np.interp(np.arange(x.shape[1]), np.linspace(0, x.shape[1]-1., num=warped.size), warped).T\n    return ret\n\ndef spawner(x, labels, sigma=0.05, verbose=0):\n    # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6983028/\n    # use verbose=-1 to turn off warnings\n    # use verbose=1 to print out figures\n    \n    import utils.dtw as dtw\n    random_points = np.random.randint(low=1, high=x.shape[1]-1, size=x.shape[0])\n    window = np.ceil(x.shape[1] / 10.).astype(int)\n    orig_steps = np.arange(x.shape[1])\n    l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels\n    \n    ret = np.zeros_like(x)\n    # for i, pat in enumerate(tqdm(x)):\n    for i, pat in enumerate(x):\n        # guarentees that same one isnt selected\n        choices = np.delete(np.arange(x.shape[0]), i)\n        # remove ones of different classes\n        choices = np.where(l[choices] == l[i])[0]\n        if choices.size > 0:     \n            random_sample = x[np.random.choice(choices)]\n            # SPAWNER splits the path into two randomly\n            path1 = dtw.dtw(pat[:random_points[i]], random_sample[:random_points[i]], dtw.RETURN_PATH, slope_constraint=\"symmetric\", window=window)\n            path2 = dtw.dtw(pat[random_points[i]:], random_sample[random_points[i]:], dtw.RETURN_PATH, slope_constraint=\"symmetric\", window=window)\n            combined = np.concatenate((np.vstack(path1), np.vstack(path2+random_points[i])), axis=1)\n            if verbose:\n                # print(random_points[i])\n                dtw_value, cost, DTW_map, path = dtw.dtw(pat, random_sample, return_flag = dtw.RETURN_ALL, slope_constraint=slope_constraint, window=window)\n                dtw.draw_graph1d(cost, DTW_map, path, pat, random_sample)\n                dtw.draw_graph1d(cost, DTW_map, combined, pat, random_sample)\n            mean = np.mean([pat[combined[0]], random_sample[combined[1]]], axis=0)\n            for dim in range(x.shape[2]):\n                ret[i,:,dim] = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=mean.shape[0]), mean[:,dim]).T\n        else:\n            # if verbose > -1:\n            #     print(\"There is only one pattern of class {}, skipping pattern average\".format(l[i]))\n            ret[i,:] = pat\n    return jitter(ret, sigma=sigma)\n\ndef wdba(x, labels, batch_size=6, slope_constraint=\"symmetric\", use_window=True, verbose=0):\n    # https://ieeexplore.ieee.org/document/8215569\n    # use verbose = -1 to turn off warnings    \n    # slope_constraint is for DTW. \"symmetric\" or \"asymmetric\"\n    x = np.array(x)\n    import utils.dtw as dtw\n    \n    if use_window:\n        window = np.ceil(x.shape[1] / 10.).astype(int)\n    else:\n        window = None\n    orig_steps = np.arange(x.shape[1])\n    l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels\n        \n    ret = np.zeros_like(x)\n    # for i in tqdm(range(ret.shape[0])):\n    for i in range(ret.shape[0]):\n        # get the same class as i\n        choices = np.where(l == l[i])[0]\n        if choices.size > 0:        \n            # pick random intra-class pattern\n            k = min(choices.size, batch_size)\n            random_prototypes = x[np.random.choice(choices, k, replace=False)]\n            \n            # calculate dtw between all\n            dtw_matrix = np.zeros((k, k))\n            for p, prototype in enumerate(random_prototypes):\n                for s, sample in enumerate(random_prototypes):\n                    if p == s:\n                        dtw_matrix[p, s] = 0.\n                    else:\n                        dtw_matrix[p, s] = dtw.dtw(prototype, sample, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                        \n            # get medoid\n            medoid_id = np.argsort(np.sum(dtw_matrix, axis=1))[0]\n            nearest_order = np.argsort(dtw_matrix[medoid_id])\n            medoid_pattern = random_prototypes[medoid_id]\n            \n            # start weighted DBA\n            average_pattern = np.zeros_like(medoid_pattern)\n            weighted_sums = np.zeros((medoid_pattern.shape[0]))\n            for nid in nearest_order:\n                if nid == medoid_id or dtw_matrix[medoid_id, nearest_order[1]] == 0.:\n                    average_pattern += medoid_pattern\n                    weighted_sums += np.ones_like(weighted_sums) \n                else:\n                    path = dtw.dtw(medoid_pattern, random_prototypes[nid], dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n                    dtw_value = dtw_matrix[medoid_id, nid]\n                    warped = random_prototypes[nid, path[1]]\n                    weight = np.exp(np.log(0.5)*dtw_value/dtw_matrix[medoid_id, nearest_order[1]])\n                    average_pattern[path[0]] += weight * warped\n                    weighted_sums[path[0]] += weight \n            \n            ret[i,:] = average_pattern / weighted_sums[:,np.newaxis]\n        else:\n            # if verbose > -1:\n            #     print(\"There is only one pattern of class {}, skipping pattern average\".format(l[i]))\n            ret[i,:] = x[i]\n    return ret\n\n# Proposed\n\ndef random_guided_warp(x, labels, slope_constraint=\"symmetric\", use_window=True, dtw_type=\"normal\", verbose=0):\n    # use verbose = -1 to turn off warnings\n    # slope_constraint is for DTW. \"symmetric\" or \"asymmetric\"\n    # dtw_type is for shapeDTW or DTW. \"normal\" or \"shape\"\n    \n    import utils.dtw as dtw\n    \n    if use_window:\n        window = np.ceil(x.shape[1] / 10.).astype(int)\n    else:\n        window = None\n    orig_steps = np.arange(x.shape[1])\n    l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels\n    \n    ret = np.zeros_like(x)\n    # for i, pat in enumerate(tqdm(x)):\n    for i, pat in enumerate(x):\n        # guarentees that same one isnt selected\n        choices = np.delete(np.arange(x.shape[0]), i)\n        # remove ones of different classes\n        choices = np.where(l[choices] == l[i])[0]\n        if choices.size > 0:        \n            # pick random intra-class pattern\n            random_prototype = x[np.random.choice(choices)]\n            \n            if dtw_type == \"shape\":\n                path = dtw.shape_dtw(random_prototype, pat, dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n            else:\n                path = dtw.dtw(random_prototype, pat, dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n                            \n            # Time warp\n            warped = pat[path[1]]\n            for dim in range(x.shape[2]):\n                ret[i,:,dim] = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=warped.shape[0]), warped[:,dim]).T\n        else:\n            # if verbose > -1:\n            #     print(\"There is only one pattern of class {}, skipping timewarping\".format(l[i]))\n            ret[i,:] = pat\n    return ret\n\ndef random_guided_warp_shape(x, labels, slope_constraint=\"symmetric\", use_window=True):\n    return random_guided_warp(x, labels, slope_constraint, use_window, dtw_type=\"shape\")\n\ndef discriminative_guided_warp(x, labels, batch_size=6, slope_constraint=\"symmetric\", use_window=True, dtw_type=\"normal\", use_variable_slice=True, verbose=0):\n    # use verbose = -1 to turn off warnings\n    # slope_constraint is for DTW. \"symmetric\" or \"asymmetric\"\n    # dtw_type is for shapeDTW or DTW. \"normal\" or \"shape\"\n    \n    import utils.dtw as dtw\n    \n    if use_window:\n        window = np.ceil(x.shape[1] / 10.).astype(int)\n    else:\n        window = None\n    orig_steps = np.arange(x.shape[1])\n    l = np.argmax(labels, axis=1) if labels.ndim > 1 else labels\n    \n    positive_batch = np.ceil(batch_size / 2).astype(int)\n    negative_batch = np.floor(batch_size / 2).astype(int)\n        \n    ret = np.zeros_like(x)\n    warp_amount = np.zeros(x.shape[0])\n    # for i, pat in enumerate(tqdm(x)):\n    for i, pat in enumerate(x):\n        # guarentees that same one isnt selected\n        choices = np.delete(np.arange(x.shape[0]), i)\n        \n        # remove ones of different classes\n        positive = np.where(l[choices] == l[i])[0]\n        negative = np.where(l[choices] != l[i])[0]\n        \n        if positive.size > 0 and negative.size > 0:\n            pos_k = min(positive.size, positive_batch)\n            neg_k = min(negative.size, negative_batch)\n            positive_prototypes = x[np.random.choice(positive, pos_k, replace=False)]\n            negative_prototypes = x[np.random.choice(negative, neg_k, replace=False)]\n                        \n            # vector embedding and nearest prototype in one\n            pos_aves = np.zeros((pos_k))\n            neg_aves = np.zeros((pos_k))\n            if dtw_type == \"shape\":\n                for p, pos_prot in enumerate(positive_prototypes):\n                    for ps, pos_samp in enumerate(positive_prototypes):\n                        if p != ps:\n                            pos_aves[p] += (1./(pos_k-1.))*dtw.shape_dtw(pos_prot, pos_samp, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                    for ns, neg_samp in enumerate(negative_prototypes):\n                        neg_aves[p] += (1./neg_k)*dtw.shape_dtw(pos_prot, neg_samp, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                selected_id = np.argmax(neg_aves - pos_aves)\n                path = dtw.shape_dtw(positive_prototypes[selected_id], pat, dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n            else:\n                for p, pos_prot in enumerate(positive_prototypes):\n                    for ps, pos_samp in enumerate(positive_prototypes):\n                        if p != ps:\n                            pos_aves[p] += (1./(pos_k-1.))*dtw.dtw(pos_prot, pos_samp, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                    for ns, neg_samp in enumerate(negative_prototypes):\n                        neg_aves[p] += (1./neg_k)*dtw.dtw(pos_prot, neg_samp, dtw.RETURN_VALUE, slope_constraint=slope_constraint, window=window)\n                selected_id = np.argmax(neg_aves - pos_aves)\n                path = dtw.dtw(positive_prototypes[selected_id], pat, dtw.RETURN_PATH, slope_constraint=slope_constraint, window=window)\n                   \n            # Time warp\n            warped = pat[path[1]]\n            warp_path_interp = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=warped.shape[0]), path[1])\n            warp_amount[i] = np.sum(np.abs(orig_steps-warp_path_interp))\n            for dim in range(x.shape[2]):\n                ret[i,:,dim] = np.interp(orig_steps, np.linspace(0, x.shape[1]-1., num=warped.shape[0]), warped[:,dim]).T\n        else:\n            # if verbose > -1:\n            #     print(\"There is only one pattern of class {}\".format(l[i]))\n            ret[i,:] = pat\n            warp_amount[i] = 0.\n    if use_variable_slice:\n        max_warp = np.max(warp_amount)\n        if max_warp == 0:\n            # unchanged\n            ret = window_slice(ret, reduce_ratio=0.9)\n        else:\n            for i, pat in enumerate(ret):\n                # Variable Sllicing\n                ret[i] = window_slice(pat[np.newaxis,:,:], reduce_ratio=0.9+0.1*warp_amount[i]/max_warp)[0]\n    return ret\n\ndef discriminative_guided_warp_shape(x, labels, batch_size=6, slope_constraint=\"symmetric\", use_window=True):\n    return discriminative_guided_warp(x, labels, batch_size, slope_constraint, use_window, dtw_type=\"shape\")\n\n\ndef run_augmentation(x, y, args):\n    print(\"Augmenting %s\"%args.data)\n    np.random.seed(args.seed)\n    x_aug = x\n    y_aug = y\n    if args.augmentation_ratio > 0:\n        augmentation_tags = \"%d\"%args.augmentation_ratio\n        for n in range(args.augmentation_ratio):\n            x_temp, augmentation_tags = augment(x, y, args)\n            x_aug = np.append(x_aug, x_temp, axis=0)\n            y_aug = np.append(y_aug, y, axis=0)\n            print(\"Round %d: %s done\"%(n, augmentation_tags))\n        if args.extra_tag:\n            augmentation_tags += \"_\"+args.extra_tag\n    else:\n        augmentation_tags = args.extra_tag\n    return x_aug, y_aug, augmentation_tags\n\ndef run_augmentation_single(x, y, args):\n    # print(\"Augmenting %s\"%args.data)\n    np.random.seed(args.seed)\n    x_aug = x\n    y_aug = y\n    if args.augmentation_ratio > 0:\n        augmentation_tags = \"%d\"%args.augmentation_ratio\n        for n in range(args.augmentation_ratio):\n            x_temp, augmentation_tags = augment(x, y, args)\n            x_aug =x_temp\n            # print(\"Round %d: %s done\"%(n, augmentation_tags))\n        if args.extra_tag:\n            augmentation_tags += \"_\"+args.extra_tag\n    else:\n        augmentation_tags = args.extra_tag\n    return x_aug, y_aug, augmentation_tags\n\n\ndef augment(x, y, args):\n    import utils.augmentation as aug\n    augmentation_tags = \"\"\n    if args.jitter:\n        x = aug.jitter(x)\n        augmentation_tags += \"_jitter\"\n    if args.scaling:\n        x = aug.scaling(x)\n        augmentation_tags += \"_scaling\"\n    if args.rotation:\n        x = aug.rotation(x)\n        augmentation_tags += \"_rotation\"\n    if args.permutation:\n        x = aug.permutation(x)\n        augmentation_tags += \"_permutation\"\n    if args.randompermutation:\n        x = aug.permutation(x, seg_mode=\"random\")\n        augmentation_tags += \"_randomperm\"\n    if args.magwarp:\n        x = aug.magnitude_warp(x)\n        augmentation_tags += \"_magwarp\"\n    if args.timewarp:\n        x = aug.time_warp(x)\n        augmentation_tags += \"_timewarp\"\n    if args.windowslice:\n        x = aug.window_slice(x)\n        augmentation_tags += \"_windowslice\"\n    if args.windowwarp:\n        x = aug.window_warp(x)\n        augmentation_tags += \"_windowwarp\"\n    if args.spawner:\n        x = aug.spawner(x, y)\n        augmentation_tags += \"_spawner\"\n    if args.dtwwarp:\n        x = aug.random_guided_warp(x, y)\n        augmentation_tags += \"_rgw\"\n    if args.shapedtwwarp:\n        x = aug.random_guided_warp_shape(x, y)\n        augmentation_tags += \"_rgws\"\n    if args.wdba:\n        x = aug.wdba(x, y)\n        augmentation_tags += \"_wdba\"\n    if args.discdtw:\n        x = aug.discriminative_guided_warp(x, y)\n        augmentation_tags += \"_dgw\"\n    if args.discsdtw:\n        x = aug.discriminative_guided_warp_shape(x, y)\n        augmentation_tags += \"_dgws\"\n    return x, augmentation_tags\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/dtw.py",
    "content": "__author__ = 'Brian Iwana'\n\nimport numpy as np\nimport math\nimport sys\n\nRETURN_VALUE = 0\nRETURN_PATH = 1\nRETURN_ALL = -1\n\n# Core DTW\ndef _traceback(DTW, slope_constraint):\n    i, j = np.array(DTW.shape) - 1\n    p, q = [i-1], [j-1]\n    \n    if slope_constraint == \"asymmetric\":\n        while (i > 1):\n            tb = np.argmin((DTW[i-1, j], DTW[i-1, j-1], DTW[i-1, j-2]))\n\n            if (tb == 0):\n                i = i - 1\n            elif (tb == 1):\n                i = i - 1\n                j = j - 1\n            elif (tb == 2):\n                i = i - 1\n                j = j - 2\n\n            p.insert(0, i-1)\n            q.insert(0, j-1)\n    elif slope_constraint == \"symmetric\":\n        while (i > 1 or j > 1):\n            tb = np.argmin((DTW[i-1, j-1], DTW[i-1, j], DTW[i, j-1]))\n\n            if (tb == 0):\n                i = i - 1\n                j = j - 1\n            elif (tb == 1):\n                i = i - 1\n            elif (tb == 2):\n                j = j - 1\n\n            p.insert(0, i-1)\n            q.insert(0, j-1)\n    else:\n        sys.exit(\"Unknown slope constraint %s\"%slope_constraint)\n        \n    return (np.array(p), np.array(q))\n\ndef dtw(prototype, sample, return_flag = RETURN_VALUE, slope_constraint=\"asymmetric\", window=None):\n    \"\"\" Computes the DTW of two sequences.\n    :param prototype: np array [0..b]\n    :param sample: np array [0..t]\n    :param extended: bool\n    \"\"\"\n    p = prototype.shape[0]\n    assert p != 0, \"Prototype empty!\"\n    s = sample.shape[0]\n    assert s != 0, \"Sample empty!\"\n    \n    if window is None:\n        window = s\n    \n    cost = np.full((p, s), np.inf)\n    for i in range(p):\n        start = max(0, i-window)\n        end = min(s, i+window)+1\n        cost[i,start:end]=np.linalg.norm(sample[start:end] - prototype[i], axis=1)\n\n    DTW = _cummulative_matrix(cost, slope_constraint, window)\n        \n    if return_flag == RETURN_ALL:\n        return DTW[-1,-1], cost, DTW[1:,1:], _traceback(DTW, slope_constraint)\n    elif return_flag == RETURN_PATH:\n        return _traceback(DTW, slope_constraint)\n    else:\n        return DTW[-1,-1]\n\ndef _cummulative_matrix(cost, slope_constraint, window):\n    p = cost.shape[0]\n    s = cost.shape[1]\n    \n    # Note: DTW is one larger than cost and the original patterns\n    DTW = np.full((p+1, s+1), np.inf)\n\n    DTW[0, 0] = 0.0\n\n    if slope_constraint == \"asymmetric\":\n        for i in range(1, p+1):\n            if i <= window+1:\n                DTW[i,1] = cost[i-1,0] + min(DTW[i-1,0], DTW[i-1,1])\n            for j in range(max(2, i-window), min(s, i+window)+1):\n                DTW[i,j] = cost[i-1,j-1] + min(DTW[i-1,j-2], DTW[i-1,j-1], DTW[i-1,j])\n    elif slope_constraint == \"symmetric\":\n        for i in range(1, p+1):\n            for j in range(max(1, i-window), min(s, i+window)+1):\n                DTW[i,j] = cost[i-1,j-1] + min(DTW[i-1,j-1], DTW[i,j-1], DTW[i-1,j])\n    else:\n        sys.exit(\"Unknown slope constraint %s\"%slope_constraint)\n        \n    return DTW\n\ndef shape_dtw(prototype, sample, return_flag = RETURN_VALUE, slope_constraint=\"asymmetric\", window=None, descr_ratio=0.05):\n    \"\"\" Computes the shapeDTW of two sequences.\n    :param prototype: np array [0..b]\n    :param sample: np array [0..t]\n    :param extended: bool\n    \"\"\"\n    # shapeDTW\n    # https://www.sciencedirect.com/science/article/pii/S0031320317303710\n    \n    p = prototype.shape[0]\n    assert p != 0, \"Prototype empty!\"\n    s = sample.shape[0]\n    assert s != 0, \"Sample empty!\"\n    \n    if window is None:\n        window = s\n        \n    p_feature_len = np.clip(np.round(p * descr_ratio), 5, 100).astype(int)\n    s_feature_len = np.clip(np.round(s * descr_ratio), 5, 100).astype(int)\n    \n    # padding\n    p_pad_front = (np.ceil(p_feature_len / 2.)).astype(int)\n    p_pad_back = (np.floor(p_feature_len / 2.)).astype(int)\n    s_pad_front = (np.ceil(s_feature_len / 2.)).astype(int)\n    s_pad_back = (np.floor(s_feature_len / 2.)).astype(int)\n    \n    prototype_pad = np.pad(prototype, ((p_pad_front, p_pad_back), (0, 0)), mode=\"edge\") \n    sample_pad = np.pad(sample, ((s_pad_front, s_pad_back), (0, 0)), mode=\"edge\") \n    p_p = prototype_pad.shape[0]\n    s_p = sample_pad.shape[0]\n        \n    cost = np.full((p, s), np.inf)\n    for i in range(p):\n        for j in range(max(0, i-window), min(s, i+window)):\n            cost[i, j] = np.linalg.norm(sample_pad[j:j+s_feature_len] - prototype_pad[i:i+p_feature_len])\n            \n    DTW = _cummulative_matrix(cost, slope_constraint=slope_constraint, window=window)\n    \n    if return_flag == RETURN_ALL:\n        return DTW[-1,-1], cost, DTW[1:,1:], _traceback(DTW, slope_constraint)\n    elif return_flag == RETURN_PATH:\n        return _traceback(DTW, slope_constraint)\n    else:\n        return DTW[-1,-1]\n    \n# Draw helpers\ndef draw_graph2d(cost, DTW, path, prototype, sample):\n    import matplotlib.pyplot as plt\n    plt.figure(figsize=(12, 8))\n   # plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05, hspace=.01)\n\n    #cost\n    plt.subplot(2, 3, 1)\n    plt.imshow(cost.T, cmap=plt.cm.gray, interpolation='none', origin='lower')\n    plt.plot(path[0], path[1], 'y')\n    plt.xlim((-0.5, cost.shape[0]-0.5))\n    plt.ylim((-0.5, cost.shape[0]-0.5))\n\n    #dtw\n    plt.subplot(2, 3, 2)\n    plt.imshow(DTW.T, cmap=plt.cm.gray, interpolation='none', origin='lower')\n    plt.plot(path[0]+1, path[1]+1, 'y')\n    plt.xlim((-0.5, DTW.shape[0]-0.5))\n    plt.ylim((-0.5, DTW.shape[0]-0.5))\n\n    #prototype\n    plt.subplot(2, 3, 4)\n    plt.plot(prototype[:,0], prototype[:,1], 'b-o')\n\n    #connection\n    plt.subplot(2, 3, 5)\n    for i in range(0,path[0].shape[0]):\n        plt.plot([prototype[path[0][i],0], sample[path[1][i],0]],[prototype[path[0][i],1], sample[path[1][i],1]], 'y-')\n    plt.plot(sample[:,0], sample[:,1], 'g-o')\n    plt.plot(prototype[:,0], prototype[:,1], 'b-o')\n\n    #sample\n    plt.subplot(2, 3, 6)\n    plt.plot(sample[:,0], sample[:,1], 'g-o')\n\n    plt.tight_layout()\n    plt.show()\n\ndef draw_graph1d(cost, DTW, path, prototype, sample):\n    import matplotlib.pyplot as plt\n    plt.figure(figsize=(12, 8))\n   # plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05, hspace=.01)\n    p_steps = np.arange(prototype.shape[0])\n    s_steps = np.arange(sample.shape[0])\n\n    #cost\n    plt.subplot(2, 3, 1)\n    plt.imshow(cost.T, cmap=plt.cm.gray, interpolation='none', origin='lower')\n    plt.plot(path[0], path[1], 'y')\n    plt.xlim((-0.5, cost.shape[0]-0.5))\n    plt.ylim((-0.5, cost.shape[0]-0.5))\n\n    #dtw\n    plt.subplot(2, 3, 2)\n    plt.imshow(DTW.T, cmap=plt.cm.gray, interpolation='none', origin='lower')\n    plt.plot(path[0]+1, path[1]+1, 'y')\n    plt.xlim((-0.5, DTW.shape[0]-0.5))\n    plt.ylim((-0.5, DTW.shape[0]-0.5))\n\n    #prototype\n    plt.subplot(2, 3, 4)\n    plt.plot(p_steps, prototype[:,0], 'b-o')\n\n    #connection\n    plt.subplot(2, 3, 5)\n    for i in range(0,path[0].shape[0]):\n        plt.plot([path[0][i], path[1][i]],[prototype[path[0][i],0], sample[path[1][i],0]], 'y-')\n    plt.plot(p_steps, sample[:,0], 'g-o')\n    plt.plot(s_steps, prototype[:,0], 'b-o')\n\n    #sample\n    plt.subplot(2, 3, 6)\n    plt.plot(s_steps, sample[:,0], 'g-o')\n\n    plt.tight_layout()\n    plt.show()"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/dtw_metric.py",
    "content": "from numpy import array, zeros, full, argmin, inf, ndim\nfrom scipy.spatial.distance import cdist\nfrom math import isinf\n\n\ndef dtw(x, y, dist, warp=1, w=inf, s=1.0):\n    \"\"\"\n    Computes Dynamic Time Warping (DTW) of two sequences.\n\n    :param array x: N1*M array\n    :param array y: N2*M array\n    :param func dist: distance used as cost measure\n    :param int warp: how many shifts are computed.\n    :param int w: window size limiting the maximal distance between indices of matched entries |i,j|.\n    :param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal\n    Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.\n    \"\"\"\n    assert len(x)\n    assert len(y)\n    assert isinf(w) or (w >= abs(len(x) - len(y)))\n    assert s > 0\n    r, c = len(x), len(y)\n    if not isinf(w):\n        D0 = full((r + 1, c + 1), inf)\n        for i in range(1, r + 1):\n            D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0\n        D0[0, 0] = 0\n    else:\n        D0 = zeros((r + 1, c + 1))\n        D0[0, 1:] = inf\n        D0[1:, 0] = inf\n    D1 = D0[1:, 1:]  # view\n    for i in range(r):\n        for j in range(c):\n            if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))):\n                D1[i, j] = dist(x[i], y[j])\n    C = D1.copy()\n    jrange = range(c)\n    for i in range(r):\n        if not isinf(w):\n            jrange = range(max(0, i - w), min(c, i + w + 1))\n        for j in jrange:\n            min_list = [D0[i, j]]\n            for k in range(1, warp + 1):\n                i_k = min(i + k, r)\n                j_k = min(j + k, c)\n                min_list += [D0[i_k, j] * s, D0[i, j_k] * s]\n            D1[i, j] += min(min_list)\n    if len(x) == 1:\n        path = zeros(len(y)), range(len(y))\n    elif len(y) == 1:\n        path = range(len(x)), zeros(len(x))\n    else:\n        path = _traceback(D0)\n    return D1[-1, -1], C, D1, path\n\n\ndef accelerated_dtw(x, y, dist, warp=1):\n    \"\"\"\n    Computes Dynamic Time Warping (DTW) of two sequences in a faster way.\n    Instead of iterating through each element and calculating each distance,\n    this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)\n\n    :param array x: N1*M array\n    :param array y: N2*M array\n    :param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.\n    If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.\n    :param int warp: how many shifts are computed.\n    Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.\n    \"\"\"\n    assert len(x)\n    assert len(y)\n    if ndim(x) == 1:\n        x = x.reshape(-1, 1)\n    if ndim(y) == 1:\n        y = y.reshape(-1, 1)\n    r, c = len(x), len(y)\n    D0 = zeros((r + 1, c + 1))\n    D0[0, 1:] = inf\n    D0[1:, 0] = inf\n    D1 = D0[1:, 1:]\n    D0[1:, 1:] = cdist(x, y, dist)\n    C = D1.copy()\n    for i in range(r):\n        for j in range(c):\n            min_list = [D0[i, j]]\n            for k in range(1, warp + 1):\n                min_list += [D0[min(i + k, r), j],\n                             D0[i, min(j + k, c)]]\n            D1[i, j] += min(min_list)\n    if len(x) == 1:\n        path = zeros(len(y)), range(len(y))\n    elif len(y) == 1:\n        path = range(len(x)), zeros(len(x))\n    else:\n        path = _traceback(D0)\n    return D1[-1, -1], C, D1, path\n\n\ndef _traceback(D):\n    i, j = array(D.shape) - 2\n    p, q = [i], [j]\n    while (i > 0) or (j > 0):\n        tb = argmin((D[i, j], D[i, j + 1], D[i + 1, j]))\n        if tb == 0:\n            i -= 1\n            j -= 1\n        elif tb == 1:\n            i -= 1\n        else:  # (tb == 2):\n            j -= 1\n        p.insert(0, i)\n        q.insert(0, j)\n    return array(p), array(q)\n\n\nif __name__ == '__main__':\n    w = inf\n    s = 1.0\n    if 1:  # 1-D numeric\n        from sklearn.metrics.pairwise import manhattan_distances\n        x = [0, 0, 1, 1, 2, 4, 2, 1, 2, 0]\n        y = [1, 1, 1, 2, 2, 2, 2, 3, 2, 0]\n        dist_fun = manhattan_distances\n        w = 1\n        # s = 1.2\n    elif 0:  # 2-D numeric\n        from sklearn.metrics.pairwise import euclidean_distances\n        x = [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [4, 3], [2, 3], [1, 1], [2, 2], [0, 1]]\n        y = [[1, 0], [1, 1], [1, 1], [2, 1], [4, 3], [4, 3], [2, 3], [3, 1], [1, 2], [1, 0]]\n        dist_fun = euclidean_distances\n    else:  # 1-D list of strings\n        from nltk.metrics.distance import edit_distance\n        # x = ['we', 'shelled', 'clams', 'for', 'the', 'chowder']\n        # y = ['class', 'too']\n        x = ['i', 'soon', 'found', 'myself', 'muttering', 'to', 'the', 'walls']\n        y = ['see', 'drown', 'himself']\n        # x = 'we talked about the situation'.split()\n        # y = 'we talked about the situation'.split()\n        dist_fun = edit_distance\n    dist, cost, acc, path = dtw(x, y, dist_fun, w=w, s=s)\n\n    # Vizualize\n    from matplotlib import pyplot as plt\n    plt.imshow(cost.T, origin='lower', cmap=plt.cm.Reds, interpolation='nearest')\n    plt.plot(path[0], path[1], '-o')  # relation\n    plt.xticks(range(len(x)), x)\n    plt.yticks(range(len(y)), y)\n    plt.xlabel('x')\n    plt.ylabel('y')\n    plt.axis('tight')\n    if isinf(w):\n        plt.title('Minimum distance: {}, slope weight: {}'.format(dist, s))\n    else:\n        plt.title('Minimum distance: {}, window widht: {}, slope weight: {}'.format(dist, w, s))\n    plt.show()"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/losses.py",
    "content": "# This source code is provided for the purposes of scientific reproducibility\n# under the following limited license from Element AI Inc. The code is an\n# implementation of the N-BEATS model (Oreshkin et al., N-BEATS: Neural basis\n# expansion analysis for interpretable time series forecasting,\n# https://arxiv.org/abs/1905.10437). The copyright to the source code is\n# licensed under the Creative Commons - Attribution-NonCommercial 4.0\n# International license (CC BY-NC 4.0):\n# https://creativecommons.org/licenses/by-nc/4.0/.  Any commercial use (whether\n# for the benefit of third parties or internally in production) requires an\n# explicit license. The subject-matter of the N-BEATS model and associated\n# materials are the property of Element AI Inc. and may be subject to patent\n# protection. No license to patents is granted hereunder (whether express or\n# implied). Copyright © 2020 Element AI Inc. All rights reserved.\n\n\"\"\"\nLoss functions for PyTorch.\n\"\"\"\n\nimport torch as t\nimport torch.nn as nn\nimport numpy as np\nimport pdb\n\n\ndef divide_no_nan(a, b):\n    \"\"\"\n    a/b where the resulted NaN or Inf are replaced by 0.\n    \"\"\"\n    result = a / b\n    result[result != result] = .0\n    result[result == np.inf] = .0\n    return result\n\n\nclass mape_loss(nn.Module):\n    def __init__(self):\n        super(mape_loss, self).__init__()\n\n    def forward(self, insample: t.Tensor, freq: int,\n                forecast: t.Tensor, target: t.Tensor, mask: t.Tensor) -> t.float:\n        \"\"\"\n        MAPE loss as defined in: https://en.wikipedia.org/wiki/Mean_absolute_percentage_error\n\n        :param forecast: Forecast values. Shape: batch, time\n        :param target: Target values. Shape: batch, time\n        :param mask: 0/1 mask. Shape: batch, time\n        :return: Loss value\n        \"\"\"\n        weights = divide_no_nan(mask, target)\n        return t.mean(t.abs((forecast - target) * weights))\n\n\nclass smape_loss(nn.Module):\n    def __init__(self):\n        super(smape_loss, self).__init__()\n\n    def forward(self, insample: t.Tensor, freq: int,\n                forecast: t.Tensor, target: t.Tensor, mask: t.Tensor) -> t.float:\n        \"\"\"\n        sMAPE loss as defined in https://robjhyndman.com/hyndsight/smape/ (Makridakis 1993)\n\n        :param forecast: Forecast values. Shape: batch, time\n        :param target: Target values. Shape: batch, time\n        :param mask: 0/1 mask. Shape: batch, time\n        :return: Loss value\n        \"\"\"\n        return 200 * t.mean(divide_no_nan(t.abs(forecast - target),\n                                          t.abs(forecast.data) + t.abs(target.data)) * mask)\n\n\nclass mase_loss(nn.Module):\n    def __init__(self):\n        super(mase_loss, self).__init__()\n\n    def forward(self, insample: t.Tensor, freq: int,\n                forecast: t.Tensor, target: t.Tensor, mask: t.Tensor) -> t.float:\n        \"\"\"\n        MASE loss as defined in \"Scaled Errors\" https://robjhyndman.com/papers/mase.pdf\n\n        :param insample: Insample values. Shape: batch, time_i\n        :param freq: Frequency value\n        :param forecast: Forecast values. Shape: batch, time_o\n        :param target: Target values. Shape: batch, time_o\n        :param mask: 0/1 mask. Shape: batch, time_o\n        :return: Loss value\n        \"\"\"\n        masep = t.mean(t.abs(insample[:, freq:] - insample[:, :-freq]), dim=1)\n        masked_masep_inv = divide_no_nan(mask, masep[:, None])\n        return t.mean(t.abs(target - forecast) * masked_masep_inv)\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/m4_summary.py",
    "content": "# This source code is provided for the purposes of scientific reproducibility\n# under the following limited license from Element AI Inc. The code is an\n# implementation of the N-BEATS model (Oreshkin et al., N-BEATS: Neural basis\n# expansion analysis for interpretable time series forecasting,\n# https://arxiv.org/abs/1905.10437). The copyright to the source code is\n# licensed under the Creative Commons - Attribution-NonCommercial 4.0\n# International license (CC BY-NC 4.0):\n# https://creativecommons.org/licenses/by-nc/4.0/.  Any commercial use (whether\n# for the benefit of third parties or internally in production) requires an\n# explicit license. The subject-matter of the N-BEATS model and associated\n# materials are the property of Element AI Inc. and may be subject to patent\n# protection. No license to patents is granted hereunder (whether express or\n# implied). Copyright 2020 Element AI Inc. All rights reserved.\n\n\"\"\"\nM4 Summary\n\"\"\"\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\n\nfrom Other_baselines.data_provider.m4 import M4Dataset\nfrom Other_baselines.data_provider.m4 import M4Meta\nimport os\n\n\ndef group_values(values, groups, group_name):\n    return np.array([v[~np.isnan(v)] for v in values[groups == group_name]])\n\n\ndef mase(forecast, insample, outsample, frequency):\n    return np.mean(np.abs(forecast - outsample)) / np.mean(np.abs(insample[:-frequency] - insample[frequency:]))\n\n\ndef smape_2(forecast, target):\n    denom = np.abs(target) + np.abs(forecast)\n    # divide by 1.0 instead of 0.0, in case when denom is zero the enumerator will be 0.0 anyway.\n    denom[denom == 0.0] = 1.0\n    return 200 * np.abs(forecast - target) / denom\n\n\ndef mape(forecast, target):\n    denom = np.abs(target)\n    # divide by 1.0 instead of 0.0, in case when denom is zero the enumerator will be 0.0 anyway.\n    denom[denom == 0.0] = 1.0\n    return 100 * np.abs(forecast - target) / denom\n\n\nclass M4Summary:\n    def __init__(self, file_path, root_path):\n        self.file_path = file_path\n        self.training_set = M4Dataset.load(training=True, dataset_file=root_path)\n        self.test_set = M4Dataset.load(training=False, dataset_file=root_path)\n        self.naive_path = os.path.join(root_path, 'submission-Naive2.csv')\n\n    def evaluate(self):\n        \"\"\"\n        Evaluate forecasts using M4 test dataset.\n\n        :param forecast: Forecasts. Shape: timeseries, time.\n        :return: sMAPE and OWA grouped by seasonal patterns.\n        \"\"\"\n        grouped_owa = OrderedDict()\n\n        naive2_forecasts = pd.read_csv(self.naive_path).values[:, 1:].astype(np.float32)\n        naive2_forecasts = np.array([v[~np.isnan(v)] for v in naive2_forecasts])\n\n        model_mases = {}\n        naive2_smapes = {}\n        naive2_mases = {}\n        grouped_smapes = {}\n        grouped_mapes = {}\n        for group_name in M4Meta.seasonal_patterns:\n            file_name = self.file_path + group_name + \"_forecast.csv\"\n            if os.path.exists(file_name):\n                model_forecast = pd.read_csv(file_name).values\n\n            naive2_forecast = group_values(naive2_forecasts, self.test_set.groups, group_name)\n            target = group_values(self.test_set.values, self.test_set.groups, group_name)\n            # all timeseries within group have same frequency\n            frequency = self.training_set.frequencies[self.test_set.groups == group_name][0]\n            insample = group_values(self.training_set.values, self.test_set.groups, group_name)\n\n            model_mases[group_name] = np.mean([mase(forecast=model_forecast[i],\n                                                    insample=insample[i],\n                                                    outsample=target[i],\n                                                    frequency=frequency) for i in range(len(model_forecast))])\n            naive2_mases[group_name] = np.mean([mase(forecast=naive2_forecast[i],\n                                                     insample=insample[i],\n                                                     outsample=target[i],\n                                                     frequency=frequency) for i in range(len(model_forecast))])\n\n            naive2_smapes[group_name] = np.mean(smape_2(naive2_forecast, target))\n            grouped_smapes[group_name] = np.mean(smape_2(forecast=model_forecast, target=target))\n            grouped_mapes[group_name] = np.mean(mape(forecast=model_forecast, target=target))\n\n        grouped_smapes = self.summarize_groups(grouped_smapes)\n        grouped_mapes = self.summarize_groups(grouped_mapes)\n        grouped_model_mases = self.summarize_groups(model_mases)\n        grouped_naive2_smapes = self.summarize_groups(naive2_smapes)\n        grouped_naive2_mases = self.summarize_groups(naive2_mases)\n        for k in grouped_model_mases.keys():\n            grouped_owa[k] = (grouped_model_mases[k] / grouped_naive2_mases[k] +\n                              grouped_smapes[k] / grouped_naive2_smapes[k]) / 2\n\n        def round_all(d):\n            return dict(map(lambda kv: (kv[0], np.round(kv[1], 3)), d.items()))\n\n        return round_all(grouped_smapes), round_all(grouped_owa), round_all(grouped_mapes), round_all(\n            grouped_model_mases)\n\n    def summarize_groups(self, scores):\n        \"\"\"\n        Re-group scores respecting M4 rules.\n        :param scores: Scores per group.\n        :return: Grouped scores.\n        \"\"\"\n        scores_summary = OrderedDict()\n\n        def group_count(group_name):\n            return len(np.where(self.test_set.groups == group_name)[0])\n\n        weighted_score = {}\n        for g in ['Yearly', 'Quarterly', 'Monthly']:\n            weighted_score[g] = scores[g] * group_count(g)\n            scores_summary[g] = scores[g]\n\n        others_score = 0\n        others_count = 0\n        for g in ['Weekly', 'Daily', 'Hourly']:\n            others_score += scores[g] * group_count(g)\n            others_count += group_count(g)\n        weighted_score['Others'] = others_score\n        scores_summary['Others'] = others_score / others_count\n\n        average = np.sum(list(weighted_score.values())) / len(self.test_set.groups)\n        scores_summary['Average'] = average\n\n        return scores_summary\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/masking.py",
    "content": "import torch\n\n\nclass TriangularCausalMask():\n    def __init__(self, B, L, device=\"cpu\"):\n        mask_shape = [B, 1, L, L]\n        with torch.no_grad():\n            self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)\n\n    @property\n    def mask(self):\n        return self._mask\n\n\nclass ProbMask():\n    def __init__(self, B, H, L, index, scores, device=\"cpu\"):\n        _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1)\n        _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1])\n        indicator = _mask_ex[torch.arange(B)[:, None, None],\n                    torch.arange(H)[None, :, None],\n                    index, :].to(device)\n        self._mask = indicator.view(scores.shape).to(device)\n\n    @property\n    def mask(self):\n        return self._mask\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/metrics.py",
    "content": "import numpy as np\n\n\ndef RSE(pred, true):\n    return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2))\n\n\ndef CORR(pred, true):\n    u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0)\n    d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0))\n    return (u / d).mean(-1)\n\n\ndef MAE(pred, true):\n    return np.mean(np.abs(pred - true))\n\n\ndef MSE(pred, true):\n    return np.mean((pred - true) ** 2)\n\n\ndef RMSE(pred, true):\n    return np.sqrt(MSE(pred, true))\n\n\ndef MAPE(pred, true):\n    return np.mean(np.abs((pred - true) / true))\n\n\ndef MSPE(pred, true):\n    return np.mean(np.square((pred - true) / true))\n\n\ndef metric(pred, true):\n    mae = MAE(pred, true)\n    mse = MSE(pred, true)\n    rmse = RMSE(pred, true)\n    mape = MAPE(pred, true)\n    mspe = MSPE(pred, true)\n\n    return mae, mse, rmse, mape, mspe\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/print_args.py",
    "content": "def print_args(args):\n    print(\"\\033[1m\" + \"Basic Config\" + \"\\033[0m\")\n    print(f'  {\"Task Name:\":<20}{args.task_name:<20}{\"Is Training:\":<20}{args.is_training:<20}')\n    print(f'  {\"Model ID:\":<20}{args.model_id:<20}{\"Model:\":<20}{args.model:<20}')\n    print()\n\n    print(\"\\033[1m\" + \"Data Loader\" + \"\\033[0m\")\n    print(f'  {\"Data:\":<20}{args.data:<20}{\"Root Path:\":<20}{args.root_path:<20}')\n    print(f'  {\"Data Path:\":<20}{args.data_path:<20}{\"Features:\":<20}{args.features:<20}')\n    print(f'  {\"Target:\":<20}{args.target:<20}{\"Freq:\":<20}{args.freq:<20}')\n    print(f'  {\"Checkpoints:\":<20}{args.checkpoints:<20}')\n    print()\n\n    if args.task_name in ['long_term_forecast', 'short_term_forecast']:\n        print(\"\\033[1m\" + \"Forecasting Task\" + \"\\033[0m\")\n        print(f'  {\"Seq Len:\":<20}{args.seq_len:<20}{\"Label Len:\":<20}{args.label_len:<20}')\n        print(f'  {\"Pred Len:\":<20}{args.pred_len:<20}{\"Seasonal Patterns:\":<20}{args.seasonal_patterns:<20}')\n        print(f'  {\"Inverse:\":<20}{args.inverse:<20}')\n        print()\n\n    if args.task_name == 'imputation':\n        print(\"\\033[1m\" + \"Imputation Task\" + \"\\033[0m\")\n        print(f'  {\"Mask Rate:\":<20}{args.mask_rate:<20}')\n        print()\n\n    if args.task_name == 'anomaly_detection':\n        print(\"\\033[1m\" + \"Anomaly Detection Task\" + \"\\033[0m\")\n        print(f'  {\"Anomaly Ratio:\":<20}{args.anomaly_ratio:<20}')\n        print()\n\n    print(\"\\033[1m\" + \"Model Parameters\" + \"\\033[0m\")\n    print(f'  {\"Top k:\":<20}{args.top_k:<20}{\"Num Kernels:\":<20}{args.num_kernels:<20}')\n    print(f'  {\"Enc In:\":<20}{args.enc_in:<20}{\"Dec In:\":<20}{args.dec_in:<20}')\n    print(f'  {\"C Out:\":<20}{args.c_out:<20}{\"d model:\":<20}{args.d_model:<20}')\n    print(f'  {\"n heads:\":<20}{args.n_heads:<20}{\"e layers:\":<20}{args.e_layers:<20}')\n    print(f'  {\"d layers:\":<20}{args.d_layers:<20}{\"d FF:\":<20}{args.d_ff:<20}')\n    print(f'  {\"Moving Avg:\":<20}{args.moving_avg:<20}{\"Factor:\":<20}{args.factor:<20}')\n    print(f'  {\"Distil:\":<20}{args.distil:<20}{\"Dropout:\":<20}{args.dropout:<20}')\n    print(f'  {\"Embed:\":<20}{args.embed:<20}{\"Activation:\":<20}{args.activation:<20}')\n    print(f'  {\"Output Attention:\":<20}{args.output_attention:<20}')\n    print()\n\n    print(\"\\033[1m\" + \"Run Parameters\" + \"\\033[0m\")\n    print(f'  {\"Num Workers:\":<20}{args.num_workers:<20}{\"Itr:\":<20}{args.itr:<20}')\n    print(f'  {\"Train Epochs:\":<20}{args.train_epochs:<20}{\"Batch Size:\":<20}{args.batch_size:<20}')\n    print(f'  {\"Patience:\":<20}{args.patience:<20}{\"Learning Rate:\":<20}{args.learning_rate:<20}')\n    print(f'  {\"Des:\":<20}{args.des:<20}{\"Loss:\":<20}{args.loss:<20}')\n    print(f'  {\"Lradj:\":<20}{args.lradj:<20}{\"Use Amp:\":<20}{args.use_amp:<20}')\n    print()\n\n    print(\"\\033[1m\" + \"GPU\" + \"\\033[0m\")\n    print(f'  {\"Use GPU:\":<20}{args.use_gpu:<20}{\"GPU:\":<20}{args.gpu:<20}')\n    print(f'  {\"Use Multi GPU:\":<20}{args.use_multi_gpu:<20}{\"Devices:\":<20}{args.devices:<20}')\n    print()\n\n    print(\"\\033[1m\" + \"De-stationary Projector Params\" + \"\\033[0m\")\n    p_hidden_dims_str = ', '.join(map(str, args.p_hidden_dims))\n    print(f'  {\"P Hidden Dims:\":<20}{p_hidden_dims_str:<20}{\"P Hidden Layers:\":<20}{args.p_hidden_layers:<20}') \n    print()\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/rev_in.py",
    "content": "# -*- coding = utf-8 -*-\n\"\"\"\n文 件 名:  rev_in.py\n功能描述:  RevIn模型\n编 写 人:  \n更新时间:  2023-06-25 08:18\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n\nclass RevIn(nn.Module):\n    def __init__(self, num_features: int, eps=1e-5, affine=True, subtract_last=False):\n        \"\"\"\n        :param num_features: 输入特征数\n        :param eps: 稳定性附加值\n        :param affine: 如果为True，则RevIN具有可学习的仿射参数\n        \"\"\"\n        super(RevIn, self).__init__()\n\n        self.num_features = num_features\n        self.eps = eps\n        self.affine = affine\n        self.subtract_last = subtract_last\n\n        if self.affine:\n            self._init_params()\n\n    def forward(self, x, mode: str):\n        if mode == 'norm':\n            self._get_statistics(x)\n            x = self._normalize(x)\n\n        elif mode == 'denorm':\n            x = self._denormalize(x)\n\n        else:\n            raise NotImplementedError\n\n        return x\n\n    def _init_params(self):\n        # initialize RevIN params: (C,)\n        self.affine_weight = nn.Parameter(torch.ones(self.num_features))\n        self.affine_bias = nn.Parameter(torch.zeros(self.num_features))\n\n    def _get_statistics(self, x):\n        dim2reduce = tuple(range(1, x.ndim - 1))\n        if self.subtract_last:\n            self.last = x[:, -1, :].unsqueeze(1)\n        else:\n            self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()\n        self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()\n\n    def _normalize(self, x):\n        if self.subtract_last:\n            x = x - self.last\n        else:\n            x = x - self.mean\n        x = x / self.stdev\n        if self.affine:\n            x = x * self.affine_weight\n            x = x + self.affine_bias\n        return x\n\n    def _denormalize(self, x):\n        if self.affine:\n            x = x - self.affine_bias\n            x = x / (self.affine_weight + self.eps * self.eps)\n        x = x * self.stdev\n        if self.subtract_last:\n            x = x + self.last\n        else:\n            x = x + self.mean\n        return x\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/timefeatures.py",
    "content": "# From: gluonts/src/gluonts/time_feature/_base.py\n# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n\nclass TimeFeature:\n    def __init__(self):\n        pass\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        pass\n\n    def __repr__(self):\n        return self.__class__.__name__ + \"()\"\n\n\nclass SecondOfMinute(TimeFeature):\n    \"\"\"Minute of hour encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.second / 59.0 - 0.5\n\n\nclass MinuteOfHour(TimeFeature):\n    \"\"\"Minute of hour encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.minute / 59.0 - 0.5\n\n\nclass HourOfDay(TimeFeature):\n    \"\"\"Hour of day encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.hour / 23.0 - 0.5\n\n\nclass DayOfWeek(TimeFeature):\n    \"\"\"Hour of day encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.dayofweek / 6.0 - 0.5\n\n\nclass DayOfMonth(TimeFeature):\n    \"\"\"Day of month encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.day - 1) / 30.0 - 0.5\n\n\nclass DayOfYear(TimeFeature):\n    \"\"\"Day of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.dayofyear - 1) / 365.0 - 0.5\n\n\nclass MonthOfYear(TimeFeature):\n    \"\"\"Month of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.month - 1) / 11.0 - 0.5\n\n\nclass WeekOfYear(TimeFeature):\n    \"\"\"Week of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.isocalendar().week - 1) / 52.0 - 0.5\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n    \"\"\"\n    Returns a list of time features that will be appropriate for the given frequency string.\n    Parameters\n    ----------\n    freq_str\n        Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n    \"\"\"\n\n    features_by_offsets = {\n        offsets.YearEnd: [],\n        offsets.QuarterEnd: [MonthOfYear],\n        offsets.MonthEnd: [MonthOfYear],\n        offsets.Week: [DayOfMonth, WeekOfYear],\n        offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.Minute: [\n            MinuteOfHour,\n            HourOfDay,\n            DayOfWeek,\n            DayOfMonth,\n            DayOfYear,\n        ],\n        offsets.Second: [\n            SecondOfMinute,\n            MinuteOfHour,\n            HourOfDay,\n            DayOfWeek,\n            DayOfMonth,\n            DayOfYear,\n        ],\n    }\n\n    offset = to_offset(freq_str)\n\n    for offset_type, feature_classes in features_by_offsets.items():\n        if isinstance(offset, offset_type):\n            return [cls() for cls in feature_classes]\n\n    supported_freq_msg = f\"\"\"\n    Unsupported frequency {freq_str}\n    The following frequencies are supported:\n        Y   - yearly\n            alias: A\n        M   - monthly\n        W   - weekly\n        D   - daily\n        B   - business days\n        H   - hourly\n        T   - minutely\n            alias: min\n        S   - secondly\n    \"\"\"\n    raise RuntimeError(supported_freq_msg)\n\n\ndef time_features(dates, freq='h'):\n    return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)])\n"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/tools.py",
    "content": "import numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nfrom tqdm import tqdm\nfrom Other_baselines.utils.metrics import metric\nfrom distutils.util import strtobool\nfrom datetime import datetime\n\n\nplt.switch_backend('agg')\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n    # lr = args.learning_rate * (0.2 ** (epoch // 2))\n    if args.lradj == 'type1':\n        lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))}\n    elif args.lradj == 'type2':\n        lr_adjust = {\n            2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,\n            10: 5e-7, 15: 1e-7, 20: 5e-8\n        }\n    elif args.lradj == \"cosine\":\n        lr_adjust = {epoch: args.learning_rate /2 * (1 + math.cos(epoch / args.train_epochs * math.pi))}\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group['lr'] = lr\n        print('Updating learning rate to {}'.format(lr))\n\n\nclass EarlyStopping:\n    def __init__(self, patience=7, verbose=False, delta=0):\n        self.patience = patience\n        self.verbose = verbose\n        self.counter = 0\n        self.best_score = None\n        self.early_stop = False\n        self.val_loss_min = np.Inf\n        self.delta = delta\n\n    def __call__(self, val_loss, model, path):\n        score = -val_loss\n        if self.best_score is None:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n        elif score < self.best_score + self.delta:\n            self.counter += 1\n            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n            if self.counter >= self.patience:\n                self.early_stop = True\n        else:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n            self.counter = 0\n\n    def save_checkpoint(self, val_loss, model, path):\n        if self.verbose:\n            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')\n        torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')\n        self.val_loss_min = val_loss\n\n\nclass dotdict(dict):\n    \"\"\"dot.notation access to dictionary attributes\"\"\"\n    __getattr__ = dict.get\n    __setattr__ = dict.__setitem__\n    __delattr__ = dict.__delitem__\n\n\nclass StandardScaler():\n    def __init__(self, mean, std):\n        self.mean = mean\n        self.std = std\n\n    def transform(self, data):\n        return (data - self.mean) / self.std\n\n    def inverse_transform(self, data):\n        return (data * self.std) + self.mean\n\n\ndef visual(true, preds=None, name='./pic/test.pdf'):\n    \"\"\"\n    Results visualization\n    \"\"\"\n    plt.figure()\n    plt.plot(true, label='GroundTruth', linewidth=2)\n    if preds is not None:\n        plt.plot(preds, label='Prediction', linewidth=2)\n    plt.legend()\n    plt.savefig(name, bbox_inches='tight')\n\n\ndef adjustment(gt, pred):\n    anomaly_state = False\n    for i in range(len(gt)):\n        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n            anomaly_state = True\n            for j in range(i, 0, -1):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n            for j in range(i, len(gt)):\n                if gt[j] == 0:\n                    break\n                else:\n                    if pred[j] == 0:\n                        pred[j] = 1\n        elif gt[i] == 0:\n            anomaly_state = False\n        if anomaly_state:\n            pred[i] = 1\n    return gt, pred\n\n\ndef cal_accuracy(y_pred, y_true):\n    return np.mean(y_pred == y_true)\n\n\ndef vali(model, vali_data, vali_loader, criterion, args, device, itr):\n    total_loss = []\n    if args.model == 'PatchTST' or args.model == 'DLinear' or args.model == 'TCN':\n        model.eval()\n    else:\n        model.in_layer.eval()\n        model.out_layer.eval()\n    with torch.no_grad():\n        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in tqdm(enumerate(vali_loader)):\n            batch_x = batch_x.float().to(device)\n            batch_y = batch_y.float()\n\n            batch_x_mark = batch_x_mark.float().to(device)\n            batch_y_mark = batch_y_mark.float().to(device)\n\n            outputs = model(batch_x, itr)\n\n            # encoder - decoder\n            outputs = outputs[:, -args.pred_len:, :]\n            batch_y = batch_y[:, -args.pred_len:, :].to(device)\n\n            pred = outputs.detach().cpu()\n            true = batch_y.detach().cpu()\n\n            loss = criterion(pred, true)\n\n            total_loss.append(loss)\n    total_loss = np.average(total_loss)\n    if args.model == 'PatchTST' or args.model == 'DLinear' or args.model == 'TCN':\n        model.train()\n    else:\n        model.in_layer.train()\n        model.out_layer.train()\n    return total_loss\n\n\ndef MASE(x, freq, pred, true):\n    masep = np.mean(np.abs(x[:, freq:] - x[:, :-freq]))\n    return np.mean(np.abs(pred - true) / (masep + 1e-8))\n\n\ndef test(model, test_data, test_loader, args, device, itr):\n    preds = []\n    trues = []\n    # mases = []\n\n    model.eval()\n    with torch.no_grad():\n        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in tqdm(enumerate(test_loader)):\n            # outputs_np = batch_x.cpu().numpy()\n            # np.save(\"emb_test/ETTh2_192_test_input_itr{}_{}.npy\".format(itr, i), outputs_np)\n            # outputs_np = batch_y.cpu().numpy()\n            # np.save(\"emb_test/ETTh2_192_test_true_itr{}_{}.npy\".format(itr, i), outputs_np)\n\n            batch_x = batch_x.float().to(device)\n            batch_y = batch_y.float()\n\n            outputs = model(batch_x[:, -args.seq_len:, :], itr)\n\n            # encoder - decoder\n            outputs = outputs[:, -args.pred_len:, :]\n            batch_y = batch_y[:, -args.pred_len:, :].to(device)\n\n            pred = outputs.detach().cpu().numpy()\n            true = batch_y.detach().cpu().numpy()\n\n            preds.append(pred)\n            trues.append(true)\n\n    # preds = np.array(preds)\n    # trues = np.array(trues)\n    preds = np.concatenate(preds, axis=0)\n    trues = np.concatenate(trues, axis=0)\n    # mases = np.mean(np.array(mases))\n    print('test shape:', preds.shape, trues.shape)\n    preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n    trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n    print('test shape:', preds.shape, trues.shape)\n\n    mae, mse, rmse, mape, mspe = metric(preds, trues)\n    # print('mae:{:.4f}, mse:{:.4f}, rmse:{:.4f}, smape:{:.4f}, mases:{:.4f}'.format(mae, mse, rmse, smape, mases))\n    print('mae:{:.4f}, mse:{:.4f}, rmse:{:.4f}, mspe:{:.4f}'.format(mae, mse, rmse, mspe))\n\n    return mse, mae\n\n\ndef convert_tsf_to_dataframe(\n    full_file_path_and_name,\n    replace_missing_vals_with=\"NaN\",\n    value_column_name=\"series_value\",\n):\n    col_names = []\n    col_types = []\n    all_data = {}\n    line_count = 0\n    frequency = None\n    forecast_horizon = None\n    contain_missing_values = None\n    contain_equal_length = None\n    found_data_tag = False\n    found_data_section = False\n    started_reading_data_section = False\n\n    with open(full_file_path_and_name, \"r\", encoding=\"cp1252\") as file:\n        for line in file:\n            # Strip white space from start/end of line\n            line = line.strip()\n\n            if line:\n                if line.startswith(\"@\"):  # Read meta-data\n                    if not line.startswith(\"@data\"):\n                        line_content = line.split(\" \")\n                        if line.startswith(\"@attribute\"):\n                            if (\n                                len(line_content) != 3\n                            ):  # Attributes have both name and type\n                                raise Exception(\"Invalid meta-data specification.\")\n\n                            col_names.append(line_content[1])\n                            col_types.append(line_content[2])\n                        else:\n                            if (\n                                len(line_content) != 2\n                            ):  # Other meta-data have only values\n                                raise Exception(\"Invalid meta-data specification.\")\n\n                            if line.startswith(\"@frequency\"):\n                                frequency = line_content[1]\n                            elif line.startswith(\"@horizon\"):\n                                forecast_horizon = int(line_content[1])\n                            elif line.startswith(\"@missing\"):\n                                contain_missing_values = bool(\n                                    strtobool(line_content[1])\n                                )\n                            elif line.startswith(\"@equallength\"):\n                                contain_equal_length = bool(strtobool(line_content[1]))\n\n                    else:\n                        if len(col_names) == 0:\n                            raise Exception(\n                                \"Missing attribute section. Attribute section must come before data.\"\n                            )\n\n                        found_data_tag = True\n                elif not line.startswith(\"#\"):\n                    if len(col_names) == 0:\n                        raise Exception(\n                            \"Missing attribute section. Attribute section must come before data.\"\n                        )\n                    elif not found_data_tag:\n                        raise Exception(\"Missing @data tag.\")\n                    else:\n                        if not started_reading_data_section:\n                            started_reading_data_section = True\n                            found_data_section = True\n                            all_series = []\n\n                            for col in col_names:\n                                all_data[col] = []\n\n                        full_info = line.split(\":\")\n\n                        if len(full_info) != (len(col_names) + 1):\n                            raise Exception(\"Missing attributes/values in series.\")\n\n                        series = full_info[len(full_info) - 1]\n                        series = series.split(\",\")\n\n                        if len(series) == 0:\n                            raise Exception(\n                                \"A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series. Missing values should be indicated with ? symbol\"\n                            )\n\n                        numeric_series = []\n\n                        for val in series:\n                            if val == \"?\":\n                                numeric_series.append(replace_missing_vals_with)\n                            else:\n                                numeric_series.append(float(val))\n\n                        if numeric_series.count(replace_missing_vals_with) == len(\n                            numeric_series\n                        ):\n                            raise Exception(\n                                \"All series values are missing. A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series.\"\n                            )\n\n                        all_series.append(pd.Series(numeric_series).array)\n\n                        for i in range(len(col_names)):\n                            att_val = None\n                            if col_types[i] == \"numeric\":\n                                att_val = int(full_info[i])\n                            elif col_types[i] == \"string\":\n                                att_val = str(full_info[i])\n                            elif col_types[i] == \"date\":\n                                att_val = datetime.strptime(\n                                    full_info[i], \"%Y-%m-%d %H-%M-%S\"\n                                )\n                            else:\n                                raise Exception(\n                                    \"Invalid attribute type.\"\n                                )  # Currently, the code supports only numeric, string and date types. Extend this as required.\n\n                            if att_val is None:\n                                raise Exception(\"Invalid attribute value.\")\n                            else:\n                                all_data[col_names[i]].append(att_val)\n\n                line_count = line_count + 1\n\n        if line_count == 0:\n            raise Exception(\"Empty file.\")\n        if len(col_names) == 0:\n            raise Exception(\"Missing attribute section.\")\n        if not found_data_section:\n            raise Exception(\"Missing series information under data section.\")\n\n        all_data[value_column_name] = all_series\n        loaded_data = pd.DataFrame(all_data)\n\n        return (\n            loaded_data,\n            frequency,\n            forecast_horizon,\n            contain_missing_values,\n            contain_equal_length,\n        )\n\n\ndef test_params_flop(model,x_shape):\n    \"\"\"\n    If you want to thest former's flop, you need to give default value to inputs in model.forward(), the following code can only pass one argument to forward()\n    \"\"\"\n    model_params = 0\n    for parameter in model.parameters():\n        model_params += parameter.numel()\n        print('INFO: Trainable parameter count: {:.2f}M'.format(model_params / 1000000.0))\n    from ptflops import get_model_complexity_info\n    with torch.cuda.device(0):\n        macs, params = get_model_complexity_info(model.cuda(), x_shape, as_strings=True, print_per_layer_stat=True)\n        # print('Flops:' + flops)\n        # print('Params:' + params)\n        print('{:<30}  {:<8}'.format('Computational complexity: ', macs))\n        print('{:<30}  {:<8}'.format('Number of parameters: ', params))"
  },
  {
    "path": "ts_forecasting_methods/Other_baselines/utils/tools_tempo.py",
    "content": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nfrom datetime import datetime\nfrom distutils.util import strtobool\nimport pandas as pd\n\nfrom Other_baselines.utils.metrics import metric\n\nplt.switch_backend('agg')\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n    # lr = args.learning_rate * (0.2 ** (epoch // 2))\n    # if args.decay_fac is None:\n    #     args.decay_fac = 0.5\n    # if args.lradj == 'type1':\n    #     lr_adjust = {epoch: args.learning_rate * (args.decay_fac ** ((epoch - 1) // 1))}\n    # elif args.lradj == 'type2':\n    #     lr_adjust = {\n    #         2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,\n    #         10: 5e-7, 15: 1e-7, 20: 5e-8\n    #     }\n    if args.lradj == 'type1':\n        lr_adjust = {epoch: args.learning_rate if epoch < 3 else args.learning_rate * (0.9 ** ((epoch - 3) // 1))}\n    elif args.lradj == 'type2':\n        lr_adjust = {epoch: args.learning_rate * (args.decay_fac ** ((epoch - 1) // 1))}\n    elif args.lradj == 'type4':\n        lr_adjust = {epoch: args.learning_rate * (args.decay_fac ** ((epoch) // 1))}\n    else:\n        args.learning_rate = 1e-4\n        lr_adjust = {epoch: args.learning_rate if epoch < 3 else args.learning_rate * (0.9 ** ((epoch - 3) // 1))}\n    print(\"lr_adjust = {}\".format(lr_adjust))\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group['lr'] = lr\n        print('Updating learning rate to {}'.format(lr))\n\n\nclass EarlyStopping:\n    def __init__(self, patience=7, verbose=False, delta=0):\n        self.patience = patience\n        self.verbose = verbose\n        self.counter = 0\n        self.best_score = None\n        self.early_stop = False\n        self.val_loss_min = np.Inf\n        self.delta = delta\n\n    def __call__(self, val_loss, model, path):\n        score = -val_loss\n        if self.best_score is None:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n        elif score < self.best_score + self.delta:\n            self.counter += 1\n            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n            if self.counter >= self.patience:\n                self.early_stop = True\n        else:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n            self.counter = 0\n\n    def save_checkpoint(self, val_loss, model, path):\n        if self.verbose:\n            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')\n        torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')\n        self.val_loss_min = val_loss\n\n\nclass dotdict(dict):\n    \"\"\"dot.notation access to dictionary attributes\"\"\"\n    __getattr__ = dict.get\n    __setattr__ = dict.__setitem__\n    __delattr__ = dict.__delitem__\n\n\nclass StandardScaler():\n    def __init__(self, mean, std):\n        self.mean = mean\n        self.std = std\n\n    def transform(self, data):\n        return (data - self.mean) / self.std\n\n    def inverse_transform(self, data):\n        return (data * self.std) + self.mean\n\n\ndef visual(true, preds=None, name='./pic/test.pdf'):\n    \"\"\"\n    Results visualization\n    \"\"\"\n    plt.figure()\n    plt.plot(true, label='GroundTruth', linewidth=2)\n    if preds is not None:\n        plt.plot(preds, label='Prediction', linewidth=2)\n    plt.legend()\n    plt.savefig(name, bbox_inches='tight')\n\n\ndef convert_tsf_to_dataframe(\n        full_file_path_and_name,\n        replace_missing_vals_with=\"NaN\",\n        value_column_name=\"series_value\",\n):\n    col_names = []\n    col_types = []\n    all_data = {}\n    line_count = 0\n    frequency = None\n    forecast_horizon = None\n    contain_missing_values = None\n    contain_equal_length = None\n    found_data_tag = False\n    found_data_section = False\n    started_reading_data_section = False\n\n    with open(full_file_path_and_name, \"r\", encoding=\"cp1252\") as file:\n        for line in file:\n            # Strip white space from start/end of line\n            line = line.strip()\n\n            if line:\n                if line.startswith(\"@\"):  # Read meta-data\n                    if not line.startswith(\"@data\"):\n                        line_content = line.split(\" \")\n                        if line.startswith(\"@attribute\"):\n                            if (\n                                    len(line_content) != 3\n                            ):  # Attributes have both name and type\n                                raise Exception(\"Invalid meta-data specification.\")\n\n                            col_names.append(line_content[1])\n                            col_types.append(line_content[2])\n                        else:\n                            if (\n                                    len(line_content) != 2\n                            ):  # Other meta-data have only values\n                                raise Exception(\"Invalid meta-data specification.\")\n\n                            if line.startswith(\"@frequency\"):\n                                frequency = line_content[1]\n                            elif line.startswith(\"@horizon\"):\n                                forecast_horizon = int(line_content[1])\n                            elif line.startswith(\"@missing\"):\n                                contain_missing_values = bool(\n                                    strtobool(line_content[1])\n                                )\n                            elif line.startswith(\"@equallength\"):\n                                contain_equal_length = bool(strtobool(line_content[1]))\n\n                    else:\n                        if len(col_names) == 0:\n                            raise Exception(\n                                \"Missing attribute section. Attribute section must come before data.\"\n                            )\n\n                        found_data_tag = True\n                elif not line.startswith(\"#\"):\n                    if len(col_names) == 0:\n                        raise Exception(\n                            \"Missing attribute section. Attribute section must come before data.\"\n                        )\n                    elif not found_data_tag:\n                        raise Exception(\"Missing @data tag.\")\n                    else:\n                        if not started_reading_data_section:\n                            started_reading_data_section = True\n                            found_data_section = True\n                            all_series = []\n\n                            for col in col_names:\n                                all_data[col] = []\n\n                        full_info = line.split(\":\")\n\n                        if len(full_info) != (len(col_names) + 1):\n                            raise Exception(\"Missing attributes/values in series.\")\n\n                        series = full_info[len(full_info) - 1]\n                        series = series.split(\",\")\n\n                        if len(series) == 0:\n                            raise Exception(\n                                \"A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series. Missing values should be indicated with ? symbol\"\n                            )\n\n                        numeric_series = []\n\n                        for val in series:\n                            if val == \"?\":\n                                numeric_series.append(replace_missing_vals_with)\n                            else:\n                                numeric_series.append(float(val))\n\n                        if numeric_series.count(replace_missing_vals_with) == len(\n                                numeric_series\n                        ):\n                            raise Exception(\n                                \"All series values are missing. A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series.\"\n                            )\n\n                        all_series.append(pd.Series(numeric_series).array)\n\n                        for i in range(len(col_names)):\n                            att_val = None\n                            if col_types[i] == \"numeric\":\n                                att_val = int(full_info[i])\n                            elif col_types[i] == \"string\":\n                                att_val = str(full_info[i])\n                            elif col_types[i] == \"date\":\n                                att_val = datetime.strptime(\n                                    full_info[i], \"%Y-%m-%d %H-%M-%S\"\n                                )\n                            else:\n                                raise Exception(\n                                    \"Invalid attribute type.\"\n                                )  # Currently, the code supports only numeric, string and date types. Extend this as required.\n\n                            if att_val is None:\n                                raise Exception(\"Invalid attribute value.\")\n                            else:\n                                all_data[col_names[i]].append(att_val)\n\n                line_count = line_count + 1\n\n        if line_count == 0:\n            raise Exception(\"Empty file.\")\n        if len(col_names) == 0:\n            raise Exception(\"Missing attribute section.\")\n        if not found_data_section:\n            raise Exception(\"Missing series information under data section.\")\n\n        all_data[value_column_name] = all_series\n        loaded_data = pd.DataFrame(all_data)\n\n        return (\n            loaded_data,\n            frequency,\n            forecast_horizon,\n            contain_missing_values,\n            contain_equal_length,\n        )\n\n\ndef vali(model, vali_data, vali_loader, criterion, args, device, itr):\n    total_loss = []\n    if args.model == 'PatchTST' or args.model == 'DLinear' or args.model == 'TCN' or args.model == 'NLinear' or args.model == 'NLinear_multi':\n        model.eval()\n    elif args.model == 'TEMPO' or args.model == 'TEMPO_t5' or 'multi' in args.model:\n        model.in_layer_trend.eval()\n        model.in_layer_season.eval()\n        model.in_layer_noise.eval()\n        model.out_layer_trend.eval()\n        model.out_layer_season.eval()\n        model.out_layer_noise.eval()\n    elif args.model == 'GPT4TS' or args.model == 'GPT4TS_prompt':\n        model.in_layer.eval()\n        model.out_layer.eval()\n    else:\n        model.eval()\n\n    with torch.no_grad():\n        for i, data in tqdm(enumerate(vali_loader)):\n            batch_x, batch_y, batch_x_mark, batch_y_mark, seq_trend, seq_seasonal, seq_resid = data[0], data[1], data[\n                2], data[3], data[4], data[5], data[6]\n            batch_x = batch_x.float().to(device)\n            batch_y = batch_y.float()\n\n            batch_x_mark = batch_x_mark.float().to(device)\n            batch_y_mark = batch_y_mark.float().to(device)\n\n            seq_trend = seq_trend.float().to(device)\n            seq_seasonal = seq_seasonal.float().to(device)\n            seq_resid = seq_resid.float().to(device)\n\n            if args.model == 'GPT4TS_multi' or args.model == 'NLinear_multi' or 'TEMPO' in args.model:\n                outputs, _ = model(batch_x, itr, seq_trend, seq_seasonal, seq_resid)\n            elif 'former' in args.model or args.model == 'FEDformer' or args.model == 'TimesNet' or args.model == 'LightTS':\n                dec_inp = torch.zeros_like(batch_y[:, -args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :args.label_len, :], dec_inp], dim=1).float().to(device)\n                outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n            else:\n                outputs = model(batch_x, itr)\n\n            # encoder - decoder\n            outputs = outputs[:, -args.pred_len:, :]\n            batch_y = batch_y[:, -args.pred_len:, :].to(device)\n\n            pred = outputs.detach().cpu()\n            true = batch_y.detach().cpu()\n\n            loss = criterion(pred, true)\n\n            total_loss.append(loss)\n    total_loss = np.average(total_loss)\n    if args.model == 'PatchTST' or args.model == 'DLinear' or args.model == 'TCN' or args.model == 'NLinear' or args.model == 'NLinear_multi':\n        model.train()\n    elif args.model == 'TEMPO' or args.model == 'TEMPO_t5' or 'multi' in args.model:\n        model.in_layer_trend.train()\n        model.in_layer_season.train()\n        model.in_layer_noise.train()\n        model.out_layer_trend.train()\n        model.out_layer_season.train()\n        model.out_layer_noise.train()\n    elif args.model == 'GPT4TS' or args.model == 'GPT4TS_prompt':\n        model.in_layer.train()\n        model.out_layer.train()\n    else:\n        model.train()\n    return total_loss\n\n\ndef MASE(x, freq, pred, true):\n    masep = np.mean(np.abs(x[:, freq:] - x[:, :-freq]))\n    return np.mean(np.abs(pred - true) / (masep + 1e-8))\n\n\ndef metric_mae_mse(preds, trues):\n    mse = ((preds - trues) ** 2).mean()\n    mae = np.abs(preds - trues).mean()\n    return mae, mse\n\n\ndef test(model, test_data, test_loader, args, device, itr):\n    preds = []\n    trues = []\n    # mases = []\n\n    # Initialize accumulators for errors\n    total_mae = 0\n    total_mse = 0\n    n_samples = 0\n\n    model.eval()\n    with torch.no_grad():\n        for i, data in tqdm(enumerate(test_loader), total=len(test_loader)):\n\n            batch_x, batch_y, batch_x_mark, batch_y_mark, seq_trend, seq_seasonal, seq_resid = data[0], data[1], data[\n                2], data[3], data[4], data[5], data[6]\n\n            # outputs_np = batch_x.cpu().numpy()\n            # np.save(\"emb_test/ETTh2_192_test_input_itr{}_{}.npy\".format(itr, i), outputs_np)\n            # outputs_np = batch_y.cpu().numpy()\n            # np.save(\"emb_test/ETTh2_192_test_true_itr{}_{}.npy\".format(itr, i), outputs_np)\n\n            batch_x = batch_x.float().to(device)\n            seq_trend = seq_trend.float().to(device)\n            seq_seasonal = seq_seasonal.float().to(device)\n            seq_resid = seq_resid.float().to(device)\n            batch_x_mark = batch_x_mark.float().to(device)\n            batch_y_mark = batch_y_mark.float().to(device)\n\n            batch_y = batch_y.float()\n            if args.model == 'TEMPO' or args.model == 'TEMPO_t5' or 'multi' in args.model:\n                outputs, _ = model(batch_x[:, -args.seq_len:, :], itr, seq_trend[:, -args.seq_len:, :],\n                                   seq_seasonal[:, -args.seq_len:, :], seq_resid[:, -args.seq_len:, :])\n            elif 'former' in args.model or args.model == 'FEDformer' or args.model == 'TimesNet' or args.model == 'LightTS':\n                dec_inp = torch.zeros_like(batch_y[:, -args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :args.label_len, :], dec_inp], dim=1).float().to(device)\n                outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n            else:\n                outputs = model(batch_x[:, -args.seq_len:, :], itr)\n\n            # outputs = model(batch_x[:, -args.seq_len:, :], itr)\n\n            # encoder - decoder\n            outputs = outputs[:, -args.pred_len:, :]\n            batch_y = batch_y[:, -args.pred_len:, :].to(device)\n\n            pred = outputs.detach().cpu().numpy().astype(np.float16)\n            true = batch_y.detach().cpu().numpy().astype(np.float16)\n            torch.cuda.empty_cache()\n\n            # Calculate the batch errors\n            batch_mae, batch_mse = metric_mae_mse(pred, true)\n\n            # Update the total errors\n            total_mae += batch_mae * batch_x.size(0)  # Assuming batch_x.size(0) is the batch size\n            total_mse += batch_mse * batch_x.size(0)\n            n_samples += batch_x.size(0)\n\n            torch.cuda.empty_cache()\n\n            # preds.append(pred)\n            # trues.append(true)\n\n    # Calculate the average errors\n    mae = total_mae / n_samples\n    mse = total_mse / n_samples\n\n    print(f'Average MAE: {mae}')\n    print(f'Average MSE: {mse}')\n    # preds = np.array(preds)\n    # trues = np.array(trues)\n    # # mases = np.mean(np.array(mases))\n    # print('test shape:', preds.shape, trues.shape)\n    # preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n    # trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n    # print('test shape:', preds.shape, trues.shape)\n\n    # mae, mse, rmse, mape, mspe, smape, nd = metric(preds, trues)\n    # # print('mae:{:.4f}, mse:{:.4f}, rmse:{:.4f}, smape:{:.4f}, mases:{:.4f}'.format(mae, mse, rmse, smape, mases))\n    # print('mae:{:.4f}, mse:{:.4f}, rmse:{:.4f}, smape:{:.4f}'.format(mae, mse, rmse, smape))\n\n    return mse, mae\n"
  },
  {
    "path": "ts_forecasting_methods/README.md",
    "content": "This is the time-series forecasting training code for our paper *\"A Survey on Time-Series Pre-Trained Models\"*\n\n## Baselines\n\n|  ID  |                            Method                            | Year |   Press   |                         Source Code                          |\n| :--: | :----------------------------------------------------------: | :--: | :-------: | :----------------------------------------------------------: |\n|  1   |  [LogTrans](https://proceedings.neurips.cc/paper/2019/file/6775a0635c302542da2c32aa19d86be0-Paper.pdf)  | 2019 |    NIPS    |     [github_link](https://github.com/mlpotter/Transformer_Time_Series)      |\n|  2   | [TCN](https://arxiv.org/abs/1803.01271)  | 2018 |    arXiv    |     [github_link](https://github.com/locuslab/TCN)      |\n|  3   | [Informer](https://ojs.aaai.org/index.php/AAAI/article/view/17325/17132) | 2020 | AAAI | [github_link](https://github.com/zhouhaoyi/Informer2020) |\n|  4   | [Autoformer](https://proceedings.neurips.cc/paper/2021/hash/bcc0d400288793e8bdcd7c19a8ac0c2b-Abstract.html)  | 2021 |    NIPS    |     [github_link](https://github.com/thuml/autoformer)      |\n|  5   | [TS2Vec](https://www.aaai.org/AAAI22Papers/AAAI-8809.YueZ.pdf) | 2022 |   AAAI    |      [github_link](https://github.com/yuezhihan/ts2vec)      |\n|  6   |            [CoST](https://openreview.net/forum?id=PilZY3omXV2)            | 2022 |   ICLR    | [github_link](https://github.com/salesforce/CoST) |\n|  7   |            [TimesNet](https://arxiv.org/abs/2210.02186)            | 2023 |   ICLR    | [github_link](https://github.com/thuml/TimesNet) |\n|  8   |            [PatchTST](https://arxiv.org/abs/2211.14730)            | 2023 |   ICLR    | [github_link](https://github.com/yuqinie98/PatchTST) |\n|  9   |            [DLinear](https://arxiv.org/pdf/2205.13504)            | 2023 |   ICLR    | [github_link](https://github.com/vivva/DLinear) |\n|  10   |            [GPT4TS](https://arxiv.org/abs/2302.11939)            | 2023 |   NeurIPS    | [github_link](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All) |\n|  11   |            [TEMPO](https://openreview.net/forum?id=YH5w12OUuU)            | 2024 |   ICLR    | [github_link](https://github.com/DC-research/TEMPO) |\n|  12   |            [iTransformer](https://openreview.net/forum?id=JePfAI8fah)            | 2024 |   ICLR    | [github_link](https://github.com/thuml/iTransformer) |\n\nFor details, please refer to [ts_forecasting_methods/Other_baselines/README](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_forecasting_methods/Other_baselines/README.md)\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/Dockerfile",
    "content": "FROM nvidia/cuda:11.2.0-devel-ubuntu18.04\n\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN apt-get update  -y --fix-missing && \\\n    apt-get install -y --no-install-recommends \\\n    software-properties-common \\\n    wget \\\n    curl \\\n    unrar \\\n    unzip \\\n    git && \\\n    apt-get upgrade -y libstdc++6 && \\\n    apt-get clean -y\n\nRUN add-apt-repository ppa:ubuntu-toolchain-r/test && \\\n    apt-get update && \\\n    apt-get install -y gcc-9 && \\\n    apt-get upgrade -y libstdc++6\n\nRUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \\\n    bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b  && \\\n    rm -rf Miniconda3-latest-Linux-x86_64.sh\n\nENV PATH=/miniconda/bin:${PATH}\nRUN conda update -y conda\n\nRUN conda install -n base -c conda-forge mamba\n\nADD ./environment.yml ./environment.yml\nRUN mamba env update -n base -f ./environment.yml && \\\n    conda clean -afy\n\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021 THUML @ Tsinghua University\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/Makefile",
    "content": "IMAGE := autoformer\nROOT := $(shell dirname $(realpath $(firstword ${MAKEFILE_LIST})))\n\nDOCKER_PARAMETERS := \\\n\t--user $(shell id -u) \\\n\t--gpus all \\\n\t-v ${ROOT}:/app \\\n\t-w /app \\\n\t-e HOME=/tmp\n\ninit:\n\tdocker build -t ${IMAGE} .\n\nget_dataset:\n\tmkdir -p dataset/ && \\\n\t\tmake run_module module=\"python -m utils.download_data\" && \\\n\t\tunzip dataset/datasets.zip -d dataset/ && \\\n\t\tmv dataset/all_six_datasets/* dataset && \\\n\t\trm -r dataset/all_six_datasets dataset/__MACOSX \n\nrun_module: .require-module\n\tdocker run -i --rm ${DOCKER_PARAMETERS} \\\n\t\t${IMAGE} ${module}\n\nbash_docker:\n\tdocker run -it --rm ${DOCKER_PARAMETERS} ${IMAGE}\n\n.require-module:\nifndef module\n\t$(error module is required)\nendif\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/README.md",
    "content": "## Baselines\n\n- [x] Autoformer\n- [x] Informer\n- [x] LogTrans\n- [x] TCN\n- [x] Transformer\n- [x] Reformer\n\n## Get Started\n\nTrain the model. We provide the experiment scripts of all benchmarks under the folder `./scripts`. You can reproduce the experiment results by:\n\n```bash\nbash ./scripts/ETT_script/Autoformer_ETTm1.sh\nbash ./scripts/ECL_script/Autoformer.sh\nbash ./scripts/Exchange_script/Autoformer.sh\nbash ./scripts/Traffic_script/Autoformer.sh\nbash ./scripts/Weather_script/Autoformer.sh\nbash ./scripts/ILI_script/Autoformer.sh\n```\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/data_provider/__init__.py",
    "content": "\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/data_provider/data_factory.py",
    "content": "from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred\nfrom torch.utils.data import DataLoader\n\ndata_dict = {\n    'ETTh1': Dataset_ETT_hour,\n    'ETTh2': Dataset_ETT_hour,\n    'ETTm1': Dataset_ETT_minute,\n    'ETTm2': Dataset_ETT_minute,\n    'custom': Dataset_Custom,\n}\n\n\ndef data_provider(args, flag):\n    Data = data_dict[args.data]\n    timeenc = 0 if args.embed != 'timeF' else 1\n\n    if flag == 'test':\n        shuffle_flag = False\n        drop_last = True\n        batch_size = args.batch_size\n        freq = args.freq\n    elif flag == 'pred':\n        shuffle_flag = False\n        drop_last = False\n        batch_size = 1\n        freq = args.freq\n        Data = Dataset_Pred\n    else:\n        shuffle_flag = True\n        drop_last = True\n        batch_size = args.batch_size\n        freq = args.freq\n\n    data_set = Data(\n        root_path=args.root_path,\n        data_path=args.data_path,\n        flag=flag,\n        size=[args.seq_len, args.label_len, args.pred_len],\n        features=args.features,\n        target=args.target,\n        timeenc=timeenc,\n        freq=freq\n    )\n    print(flag, len(data_set))\n    data_loader = DataLoader(\n        data_set,\n        batch_size=batch_size,\n        shuffle=shuffle_flag,\n        num_workers=args.num_workers,\n        drop_last=drop_last)\n    return data_set, data_loader\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/data_provider/data_loader.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport os\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.preprocessing import StandardScaler\nfrom utils.timefeatures import time_features\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n\nclass Dataset_ETT_hour(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, timeenc=0, freq='h'):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]\n        border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        seq_y = self.data_y[r_begin:r_end]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_ETT_minute(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTm1.csv',\n                 target='OT', scale=True, timeenc=0, freq='t'):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]\n        border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n            df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        seq_y = self.data_y[r_begin:r_end]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_Custom(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, timeenc=0, freq='h'):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        '''\n        df_raw.columns: ['date', ...(other features), target feature]\n        '''\n        \n        num_train = int(len(df_raw) * 0.6)\n        num_vali = int(len(df_raw) * 0.2)\n        num_test = len(df_raw) - num_train - num_vali\n        border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n        border2s = [num_train, num_train + num_vali, len(df_raw)]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        seq_y = self.data_y[r_begin:r_end]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n    \n\nclass Dataset_Pred(Dataset):\n    def __init__(self, root_path, flag='pred', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['pred']\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.inverse = inverse\n        self.timeenc = timeenc\n        self.freq = freq\n        self.cols = cols\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n        '''\n        df_raw.columns: ['date', ...(other features), target feature]\n        '''\n        if self.cols:\n            cols = self.cols.copy()\n            cols.remove(self.target)\n        else:\n            cols = list(df_raw.columns)\n            cols.remove(self.target)\n            cols.remove('date')\n        df_raw = df_raw[['date'] + cols + [self.target]]\n        border1 = len(df_raw) - self.seq_len\n        border2 = len(df_raw)\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            self.scaler.fit(df_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        tmp_stamp = df_raw[['date']][border1:border2]\n        tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)\n        pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq)\n\n        df_stamp = pd.DataFrame(columns=['date'])\n        df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n            df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        if self.inverse:\n            self.data_y = df_data.values[border1:border2]\n        else:\n            self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        if self.inverse:\n            seq_y = self.data_x[r_begin:r_begin + self.label_len]\n        else:\n            seq_y = self.data_y[r_begin:r_begin + self.label_len]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/environment.yml",
    "content": "name: autoformer\nchannels:\n  - conda-forge\n  - pytorch\n  - defaults\ndependencies:\n  - python=3.7\n  - pip\n  - matplotlib\n  - numpy\n  - pandas\n  - scikit-learn\n  - pip:\n    - torch==1.9.0\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/exp/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/exp/exp_basic.py",
    "content": "import os\nimport torch\nimport numpy as np\n\n\nclass Exp_Basic(object):\n    def __init__(self, args):\n        self.args = args\n        self.device = self._acquire_device()\n        self.model = self._build_model().to(self.device)\n\n    def _build_model(self):\n        raise NotImplementedError\n        return None\n\n    def _acquire_device(self):\n        if self.args.use_gpu:\n            os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(\n                self.args.gpu) if not self.args.use_multi_gpu else self.args.devices\n            device = torch.device('cuda:{}'.format(self.args.gpu))\n            print('Use GPU: cuda:{}'.format(self.args.gpu))\n        else:\n            device = torch.device('cpu')\n            print('Use CPU')\n        return device\n\n    def _get_data(self):\n        pass\n\n    def vali(self):\n        pass\n\n    def train(self):\n        pass\n\n    def test(self):\n        pass\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/exp/exp_informer.py",
    "content": "from data.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred, Dataset_Custom_1, Dataset_Custom_2, Dataset_Custom_NoTime, Dataset_Custom_NoTime_1, Dataset_Custom_NoTime_2,Dataset_Syn\nfrom exp.exp_basic import Exp_Basic\nfrom models.model import Informer, InformerStack\n\nfrom utils.tools import EarlyStopping, adjust_learning_rate\nfrom utils.metrics import metric\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.utils.data import DataLoader\n\nimport os\nimport time\nfrom lib.dataloader import get_dataloader\nfrom lib.metrics import All_Metrics\nimport warnings\nwarnings.filterwarnings('ignore')\n\nclass Exp_Informer(Exp_Basic):\n    def __init__(self, args):\n        super(Exp_Informer, self).__init__(args)\n    \n    def _build_model(self):\n        model_dict = {\n            'informer':Informer,\n            'informerstack':InformerStack,\n        }\n        if self.args.model=='informer' or self.args.model=='informerstack':\n            e_layers = self.args.e_layers if self.args.model=='informer' else self.args.s_layers\n            model = model_dict[self.args.model](\n                self.args.enc_in,\n                self.args.dec_in, \n                self.args.c_out, \n                self.args.seq_len, \n                self.args.label_len,\n                self.args.pred_len, \n                self.args.factor,\n                self.args.d_model, \n                self.args.n_heads, \n                e_layers, # self.args.e_layers,\n                self.args.d_layers, \n                self.args.d_ff,\n                self.args.chunk_num,\n                self.args.dropout, \n                self.args.attn,\n                self.args.embed,\n                self.args.freq,\n                self.args.activation,\n                self.args.output_attention,\n                self.args.distil,\n                self.device\n            ).float()\n        \n        if self.args.use_multi_gpu and self.args.use_gpu:\n            model = nn.DataParallel(model, device_ids=self.args.device_ids)\n        return model\n\n    def _select_optimizer(self):\n        model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n        return model_optim\n\n    def _select_optimizer_p(self):\n        model_optim_p = optim.Adam([self.model.protos_q,self.model.protos_middle,self.model.protos_k], lr=self.args.learning_rate)\n        return model_optim_p\n\n    \n    def _select_criterion(self):\n        criterion =  nn.MSELoss()\n        return criterion\n\n    def vali(self, vali_loader, scaler,criterion):\n        self.model.eval()\n        total_loss = []\n        for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(vali_loader):\n            if batch_x.ndim==4:\n                batch_x = batch_x.float().squeeze(3).to(self.device)\n                batch_y = batch_y.float().squeeze(3)\n            else:\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float()\n            \n            batch_x_mark = batch_x_mark.float().to(self.device)\n            batch_y_mark = batch_y_mark.float().to(self.device)\n\n            # decoder input\n            dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).float()\n            dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).float().to(self.device)\n            # encoder - decoder\n            if self.args.use_amp:\n                print('hh')\n            else:\n                outputs, dtw_loss = self.model(i, batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n            f_dim = -1 if self.args.features=='MS' else 0\n            batch_y = batch_y[:,-self.args.pred_len:,f_dim:].to(self.device)\n\n            if self.args.real_value:\n                batch_y = scaler.inverse_transform(batch_y[:,-self.args.pred_len:,f_dim:].unsqueeze(3).to(self.device))\n            outputs = outputs.unsqueeze(3)\n            pred = outputs.detach().cpu()\n            true = batch_y.detach().cpu()\n            loss = criterion(pred, true) \n\n            total_loss.append(loss)\n        total_loss = np.average(total_loss)\n        self.model.train()\n        return total_loss\n        \n    def train(self, setting):\n        if self.args.traffic_flow:\n            train_loader, vali_loader, test_loader, scaler = get_dataloader(self.args, normalizer=self.args.normalizer,tod=self.args.tod, dow=False, weather=False, single=False)\n        else:\n            train_data, train_loader = self._get_data(flag = 'train')\n            vali_data, vali_loader = self._get_data(flag = 'val')\n            test_data, test_loader = self._get_data(flag = 'test')\n\n        path = os.path.join(self.args.checkpoints, setting)\n        if not os.path.exists(path):\n            os.makedirs(path)\n\n        time_now = time.time()\n        \n        train_steps = len(train_loader)\n        early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n        \n        model_optim = self._select_optimizer()\n        model_optim_p = self._select_optimizer_p()\n        criterion =  self._select_criterion()\n\n        for epoch in range(self.args.train_epochs):\n            iter_count = 0\n            train_loss = []\n            \n            self.model.train()\n            epoch_time = time.time()\n            self.model.init_protos(train_loader)\n            \n            for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader):\n                iter_count += 1\n                \n                model_optim.zero_grad()\n                model_optim_p.zero_grad()\n                \n                if batch_x.ndim==4:\n                    batch_x = batch_x.float().squeeze(3).to(self.device)\n                    batch_y = batch_y.float().squeeze(3)\n                else:\n                    batch_x = batch_x.float().to(self.device)\n                    batch_y = batch_y.float()\n                \n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).float()\n                dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).float().to(self.device)\n                \n                # encoder - decoder\n                if self.args.use_amp:\n                    print('hh')\n                else:\n                    outputs, dtw_loss = self.model(i, batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                    f_dim = -1 if self.args.features=='MS' else 0\n                    if self.args.real_value:\n                        batch_y = scaler.inverse_transform(batch_y[:,-self.args.pred_len:,f_dim:].unsqueeze(3).to(self.device))\n                    outputs = outputs.unsqueeze(3)\n                    loss = criterion(outputs, batch_y)\n                    #loss += dtw_loss\n                    train_loss.append(loss.item())\n                \n                if (i+1) % 100==0:\n                    print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                    speed = (time.time()-time_now)/iter_count\n                    left_time = speed*((self.args.train_epochs - epoch)*train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n                \n                if self.args.use_amp:\n                    print('hh')\n                else:\n                    loss.backward(retain_graph=True)\n                    dtw_loss.backward(retain_graph=True)\n                    model_optim.step()\n                    model_optim_p.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch+1, time.time()-epoch_time))\n            train_loss = np.average(train_loss)\n            vali_loss = self.vali(vali_loader, scaler, criterion)\n            test_loss = self.vali(test_loader, scaler, criterion)\n\n            print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n                epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n            early_stopping(vali_loss, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n\n            adjust_learning_rate(model_optim, epoch+1, self.args)\n            adjust_learning_rate(model_optim_p, epoch+1, self.args)\n            \n        best_model_path = path+'/'+'checkpoint.pth'\n        self.model.load_state_dict(torch.load(best_model_path))\n        \n        return self.model\n\n    def test(self, setting):\n        train_loader, vali_loader, test_loader, scaler = get_dataloader(self.args, normalizer=self.args.normalizer,tod=self.args.tod, dow=False, weather=False, single=False)\n\n        path = os.path.join(self.args.checkpoints, setting)\n        best_model_path = path+'/'+'checkpoint.pth'\n        self.model.load_state_dict(torch.load(best_model_path))\n        \n        #-------------------\n        \n        self.model.eval()\n        \n        preds = None\n        trues = None\n        \n        for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(test_loader):\n            if batch_x.ndim==4:\n                batch_x = batch_x.float().squeeze(3).to(self.device)\n                batch_y = batch_y.float().squeeze(3)\n            else:\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float()\n            batch_x_mark = batch_x_mark.float().to(self.device)\n            batch_y_mark = batch_y_mark.float().to(self.device)\n\n            # decoder input\n            dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).float()\n            dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).float().to(self.device)\n            # encoder - decoder\n            if self.args.use_amp:\n                print('hh')\n            else:\n                outputs, dtw_loss = self.model(i, batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n            f_dim = -1 if self.args.features=='MS' else 0\n\n            true = batch_y[:,-self.args.pred_len:,f_dim:].unsqueeze(3).to(self.device)\n            pred = outputs.unsqueeze(3)\n\n            if preds is None:\n                preds = pred\n                trues = true\n            else:\n                preds = torch.cat((preds,pred))\n                trues = torch.cat((trues,true))\n\n        trues = scaler.inverse_transform(trues)\n        if self.args.real_value:\n            preds = preds\n        else:\n            preds = scaler.inverse_transform(preds)\n\n        preds = preds.detach().cpu().numpy()\n        trues = trues.detach().cpu().numpy()\n        print('test shape:', preds.shape, trues.shape)\n\n        # result save\n        folder_path = './results_ETT/' + setting +'/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        mae, rmse, mape, _, _ = All_Metrics(preds, trues, self.args.mae_thresh, self.args.mape_thresh)\n        print(\"Average Horizon, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}%\".format(mae, rmse, mape*100))\n        print('mae:{}, rmse:{}, mape:{}'.format(mae, rmse, mape))\n\n        np.save(folder_path+'metrics.npy', np.array([mae, rmse, mape]))\n        np.save(folder_path+'pred.npy', preds)\n        np.save(folder_path+'true.npy', trues)\n\n        return\n\n    def predict(self, setting, load=False):\n        pred_data, pred_loader = self._get_data(flag='pred')\n        \n        if load:\n            path = os.path.join(self.args.checkpoints, setting)\n            best_model_path = path+'/'+'checkpoint.pth'\n            self.model.load_state_dict(torch.load(best_model_path))\n\n        self.model.eval()\n        \n        preds = []\n        \n        for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(pred_loader):\n            batch_x = batch_x.float().to(self.device)\n            batch_y = batch_y.float()\n            batch_x_mark = batch_x_mark.float().to(self.device)\n            batch_y_mark = batch_y_mark.float().to(self.device)\n\n            # decoder input\n            dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).float()\n            dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).float().to(self.device)\n            # encoder - decoder\n            if self.args.use_amp:\n                with torch.cuda.amp.autocast():\n                    if self.args.output_attention:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                    else:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n            else:\n                if self.args.output_attention:\n                    outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                else:\n                    outputs = self.model(i, batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n            f_dim = -1 if self.args.features=='MS' else 0\n            batch_y = batch_y[:,-self.args.pred_len:,f_dim:].to(self.device)\n            \n            pred = outputs.detach().cpu().numpy()#.squeeze()\n            \n            preds.append(pred)\n\n        preds = np.array(preds)\n        preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n        \n        # result save\n        folder_path = './results/' + setting +'/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n        \n        np.save(folder_path+'real_prediction.npy', preds)\n        \n        return"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/exp/exp_main.py",
    "content": "from data_provider.data_factory import data_provider\nfrom exp.exp_basic import Exp_Basic\nfrom models import Informer, Autoformer, Transformer, Reformer, TCN\nfrom models.transformer_xl import transformer_basic\nfrom models.transformer_xl import transformer_bottleneck\nfrom utils.tools import EarlyStopping, adjust_learning_rate, visual\nfrom utils.metrics import metric\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\n\nimport os\nimport time\n\nimport warnings\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nwarnings.filterwarnings('ignore')\n\n\nclass Exp_Main(Exp_Basic):\n    def __init__(self, args):\n        super(Exp_Main, self).__init__(args)\n\n    def _build_model(self):\n        model_dict = {\n            'Autoformer': Autoformer,\n            'Transformer': Transformer,\n            'Informer': Informer,\n            'Reformer': Reformer,\n            'TCN': TCN,\n            'TransformerBase':transformer_basic,\n            'LogTrans':transformer_bottleneck,\n        }\n        model = model_dict[self.args.model].Model(self.args).float()\n\n        if self.args.use_multi_gpu and self.args.use_gpu:\n            model = nn.DataParallel(model, device_ids=self.args.device_ids)\n        return model\n\n    def _get_data(self, flag):\n        data_set, data_loader = data_provider(self.args, flag)\n        return data_set, data_loader\n\n    def _select_optimizer(self):\n        model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n        return model_optim\n\n    def _select_criterion(self):\n        criterion = nn.MSELoss()\n        return criterion\n\n    def vali(self, vali_data, vali_loader, criterion):\n        total_loss = []\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float()\n\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                else:\n                    if self.args.output_attention:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                    else:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n\n                pred = outputs.detach().cpu()\n                true = batch_y.detach().cpu()\n\n                loss = criterion(pred, true)\n\n                total_loss.append(loss)\n        total_loss = np.average(total_loss)\n        self.model.train()\n        return total_loss\n\n    def train(self, setting):\n        train_data, train_loader = self._get_data(flag='train')\n        vali_data, vali_loader = self._get_data(flag='val')\n        test_data, test_loader = self._get_data(flag='test')\n\n        path = os.path.join(self.args.checkpoints, setting)\n        if not os.path.exists(path):\n            os.makedirs(path)\n\n        time_now = time.time()\n\n        train_steps = len(train_loader)\n        early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n        model_optim = self._select_optimizer()\n        criterion = self._select_criterion()\n\n        if self.args.use_amp:\n            scaler = torch.cuda.amp.GradScaler()\n\n        for epoch in range(self.args.train_epochs):\n            iter_count = 0\n            train_loss = []\n\n            self.model.train()\n            epoch_time = time.time()\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):\n                iter_count += 1\n                model_optim.zero_grad()\n                batch_x = batch_x.float().to(self.device)\n\n                batch_y = batch_y.float().to(self.device)\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n                        f_dim = -1 if self.args.features == 'MS' else 0\n                        outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                        batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n                        loss = criterion(outputs, batch_y)\n                        train_loss.append(loss.item())\n                else:\n                    if self.args.output_attention:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                    else:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark, batch_y)\n\n                    f_dim = -1 if self.args.features == 'MS' else 0\n                    outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                    batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n                    loss = criterion(outputs, batch_y)\n                    train_loss.append(loss.item())\n\n                if (i + 1) % 100 == 0:\n                    print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n                    speed = (time.time() - time_now) / iter_count\n                    left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n                    print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                if self.args.use_amp:\n                    scaler.scale(loss).backward()\n                    scaler.step(model_optim)\n                    scaler.update()\n                else:\n                    loss.backward()\n                    model_optim.step()\n\n            print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(train_loss)\n            vali_loss = self.vali(vali_data, vali_loader, criterion)\n            test_loss = self.vali(test_data, test_loader, criterion)\n\n            print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n                epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n            early_stopping(vali_loss, self.model, path)\n            if early_stopping.early_stop:\n                print(\"Early stopping\")\n                break\n\n            adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n        best_model_path = path + '/' + 'checkpoint.pth'\n        self.model.load_state_dict(torch.load(best_model_path))\n\n        return self.model\n\n    def test(self, setting, test=0):\n        test_data, test_loader = self._get_data(flag='test')\n        if test:\n            print('loading model')\n            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n        preds = []\n        trues = []\n        folder_path = './test_results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float().to(self.device)\n\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                else:\n                    if self.args.output_attention:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n\n                    else:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n                f_dim = -1 if self.args.features == 'MS' else 0\n                outputs = outputs[:, -self.args.pred_len:, f_dim:]\n                batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n                outputs = outputs.detach().cpu().numpy()\n                batch_y = batch_y.detach().cpu().numpy()\n\n                pred = outputs  # outputs.detach().cpu().numpy()  # .squeeze()\n                true = batch_y  # batch_y.detach().cpu().numpy()  # .squeeze()\n\n                preds.append(pred)\n                trues.append(true)\n                if i % 20 == 0:\n                    input = batch_x.detach().cpu().numpy()\n                    gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)\n                    pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)\n                    visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))\n\n        preds = np.array(preds)\n        trues = np.array(trues)\n        print('test shape:', preds.shape, trues.shape)\n        preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n        trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n        print('test shape:', preds.shape, trues.shape)\n\n        # result save\n        folder_path = './results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        mae, mse, rmse, mape, mspe = metric(preds, trues)\n        print('mse:{}, mae:{}'.format(mse, mae))\n        f = open(\"result.txt\", 'a')\n        f.write(setting + \"  \\n\")\n        f.write('mse:{}, mae:{}'.format(mse, mae))\n        f.write('\\n')\n        f.write('\\n')\n        f.close()\n\n        np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))\n        np.save(folder_path + 'pred.npy', preds)\n        np.save(folder_path + 'true.npy', trues)\n\n        return\n\n    def predict(self, setting, load=False):\n        pred_data, pred_loader = self._get_data(flag='pred')\n\n        if load:\n            path = os.path.join(self.args.checkpoints, setting)\n            best_model_path = path + '/' + 'checkpoint.pth'\n            self.model.load_state_dict(torch.load(best_model_path))\n\n        preds = []\n\n        self.model.eval()\n        with torch.no_grad():\n            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):\n                batch_x = batch_x.float().to(self.device)\n                batch_y = batch_y.float()\n                batch_x_mark = batch_x_mark.float().to(self.device)\n                batch_y_mark = batch_y_mark.float().to(self.device)\n\n                # decoder input\n                dec_inp = torch.zeros([batch_y.shape[0], self.args.pred_len, batch_y.shape[2]]).float()\n                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n                # encoder - decoder\n                if self.args.use_amp:\n                    with torch.cuda.amp.autocast():\n                        if self.args.output_attention:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                        else:\n                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                else:\n                    if self.args.output_attention:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n                    else:\n                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n                pred = outputs.detach().cpu().numpy()  # .squeeze()\n                preds.append(pred)\n\n        preds = np.array(preds)\n        preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n\n        # result save\n        folder_path = './results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n\n        np.save(folder_path + 'real_prediction.npy', preds)\n\n        return"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/layers/AutoCorrelation.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom math import sqrt\nimport os\n\n\nclass AutoCorrelation(nn.Module):\n    \"\"\"\n    AutoCorrelation Mechanism with the following two phases:\n    (1) period-based dependencies discovery\n    (2) time delay aggregation\n    This block can replace the self-attention family mechanism seamlessly.\n    \"\"\"\n    def __init__(self, mask_flag=True, factor=1, scale=None, attention_dropout=0.1, output_attention=False):\n        super(AutoCorrelation, self).__init__()\n        self.factor = factor\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def time_delay_agg_training(self, values, corr):\n        \"\"\"\n        SpeedUp version of Autocorrelation (a batch-normalization style design)\n        This is for the training phase.\n        \"\"\"\n        head = values.shape[1]\n        channel = values.shape[2]\n        length = values.shape[3]\n        # find top k\n        top_k = int(self.factor * math.log(length))\n        mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)\n        index = torch.topk(torch.mean(mean_value, dim=0), top_k, dim=-1)[1]\n        weights = torch.stack([mean_value[:, index[i]] for i in range(top_k)], dim=-1)\n        # update corr\n        tmp_corr = torch.softmax(weights, dim=-1)\n        # aggregation\n        tmp_values = values\n        delays_agg = torch.zeros_like(values).float()\n        for i in range(top_k):\n            pattern = torch.roll(tmp_values, -int(index[i]), -1)\n            delays_agg = delays_agg + pattern * \\\n                         (tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length))\n        return delays_agg\n\n    def time_delay_agg_inference(self, values, corr):\n        \"\"\"\n        SpeedUp version of Autocorrelation (a batch-normalization style design)\n        This is for the inference phase.\n        \"\"\"\n        batch = values.shape[0]\n        head = values.shape[1]\n        channel = values.shape[2]\n        length = values.shape[3]\n        # index init\n        init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(batch, head, channel, 1).cuda()\n        # find top k\n        top_k = int(self.factor * math.log(length))\n        mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)\n        weights, delay = torch.topk(mean_value, top_k, dim=-1)\n        # update corr\n        tmp_corr = torch.softmax(weights, dim=-1)\n        # aggregation\n        tmp_values = values.repeat(1, 1, 1, 2)\n        delays_agg = torch.zeros_like(values).float()\n        for i in range(top_k):\n            tmp_delay = init_index + delay[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length)\n            pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)\n            delays_agg = delays_agg + pattern * \\\n                         (tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length))\n        return delays_agg\n\n    def time_delay_agg_full(self, values, corr):\n        \"\"\"\n        Standard version of Autocorrelation\n        \"\"\"\n        batch = values.shape[0]\n        head = values.shape[1]\n        channel = values.shape[2]\n        length = values.shape[3]\n        # index init\n        init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(batch, head, channel, 1).cuda()\n        # find top k\n        top_k = int(self.factor * math.log(length))\n        weights, delay = torch.topk(corr, top_k, dim=-1)\n        # update corr\n        tmp_corr = torch.softmax(weights, dim=-1)\n        # aggregation\n        tmp_values = values.repeat(1, 1, 1, 2)\n        delays_agg = torch.zeros_like(values).float()\n        for i in range(top_k):\n            tmp_delay = init_index + delay[..., i].unsqueeze(-1)\n            pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)\n            delays_agg = delays_agg + pattern * (tmp_corr[..., i].unsqueeze(-1))\n        return delays_agg\n\n    def forward(self, queries, keys, values, attn_mask):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        if L > S:\n            zeros = torch.zeros_like(queries[:, :(L - S), :]).float()\n            values = torch.cat([values, zeros], dim=1)\n            keys = torch.cat([keys, zeros], dim=1)\n        else:\n            values = values[:, :L, :, :]\n            keys = keys[:, :L, :, :]\n\n        # period-based dependencies\n        q_fft = torch.fft.rfft(queries.permute(0, 2, 3, 1).contiguous(), dim=-1)\n        k_fft = torch.fft.rfft(keys.permute(0, 2, 3, 1).contiguous(), dim=-1)\n        res = q_fft * torch.conj(k_fft)\n        corr = torch.fft.irfft(res, dim=-1)\n\n        # time delay agg\n        if self.training:\n            V = self.time_delay_agg_training(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2)\n        else:\n            V = self.time_delay_agg_inference(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2)\n\n        if self.output_attention:\n            return (V.contiguous(), corr.permute(0, 3, 1, 2))\n        else:\n            return (V.contiguous(), None)\n\n\nclass AutoCorrelationLayer(nn.Module):\n    def __init__(self, correlation, d_model, n_heads, d_keys=None,\n                 d_values=None):\n        super(AutoCorrelationLayer, self).__init__()\n\n        d_keys = d_keys or (d_model // n_heads)\n        d_values = d_values or (d_model // n_heads)\n\n        self.inner_correlation = correlation\n        self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.value_projection = nn.Linear(d_model, d_values * n_heads)\n        self.out_projection = nn.Linear(d_values * n_heads, d_model)\n        self.n_heads = n_heads\n\n    def forward(self, queries, keys, values, attn_mask):\n        B, L, _ = queries.shape\n        _, S, _ = keys.shape\n        H = self.n_heads\n\n        queries = self.query_projection(queries).view(B, L, H, -1)\n        keys = self.key_projection(keys).view(B, S, H, -1)\n        values = self.value_projection(values).view(B, S, H, -1)\n\n        out, attn = self.inner_correlation(\n            queries,\n            keys,\n            values,\n            attn_mask\n        )\n        out = out.view(B, L, -1)\n\n        return self.out_projection(out), attn\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/layers/Autoformer_EncDec.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass my_Layernorm(nn.Module):\n    \"\"\"\n    Special designed layernorm for the seasonal part\n    \"\"\"\n    def __init__(self, channels):\n        super(my_Layernorm, self).__init__()\n        self.layernorm = nn.LayerNorm(channels)\n\n    def forward(self, x):\n        x_hat = self.layernorm(x)\n        bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1)\n        return x_hat - bias\n\n\nclass moving_avg(nn.Module):\n    \"\"\"\n    Moving average block to highlight the trend of time series\n    \"\"\"\n    def __init__(self, kernel_size, stride):\n        super(moving_avg, self).__init__()\n        self.kernel_size = kernel_size\n        self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)\n\n    def forward(self, x):\n        # padding on the both ends of time series\n        front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)\n        end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)\n        x = torch.cat([front, x, end], dim=1)\n        x = self.avg(x.permute(0, 2, 1))\n        x = x.permute(0, 2, 1)\n        return x\n\n\nclass series_decomp(nn.Module):\n    \"\"\"\n    Series decomposition block\n    \"\"\"\n    def __init__(self, kernel_size):\n        super(series_decomp, self).__init__()\n        self.moving_avg = moving_avg(kernel_size, stride=1)\n\n    def forward(self, x):\n        moving_mean = self.moving_avg(x)\n        res = x - moving_mean\n        return res, moving_mean\n\n\nclass EncoderLayer(nn.Module):\n    \"\"\"\n    Autoformer encoder layer with the progressive decomposition architecture\n    \"\"\"\n    def __init__(self, attention, d_model, d_ff=None, moving_avg=25, dropout=0.1, activation=\"relu\"):\n        super(EncoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.attention = attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False)\n        self.decomp1 = series_decomp(moving_avg)\n        self.decomp2 = series_decomp(moving_avg)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, attn_mask=None):\n        new_x, attn = self.attention(\n            x, x, x,\n            attn_mask=attn_mask\n        )\n        x = x + self.dropout(new_x)\n        x, _ = self.decomp1(x)\n        y = x\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n        res, _ = self.decomp2(x + y)\n        return res, attn\n\n\nclass Encoder(nn.Module):\n    \"\"\"\n    Autoformer encoder\n    \"\"\"\n    def __init__(self, attn_layers, conv_layers=None, norm_layer=None):\n        super(Encoder, self).__init__()\n        self.attn_layers = nn.ModuleList(attn_layers)\n        self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None\n        self.norm = norm_layer\n\n    def forward(self, x, attn_mask=None):\n        attns = []\n        if self.conv_layers is not None:\n            for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):\n                x, attn = attn_layer(x, attn_mask=attn_mask)\n                x = conv_layer(x)\n                attns.append(attn)\n            x, attn = self.attn_layers[-1](x)\n            attns.append(attn)\n        else:\n            for attn_layer in self.attn_layers:\n                x, attn = attn_layer(x, attn_mask=attn_mask)\n                attns.append(attn)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        return x, attns\n\n\nclass DecoderLayer(nn.Module):\n    \"\"\"\n    Autoformer decoder layer with the progressive decomposition architecture\n    \"\"\"\n    def __init__(self, self_attention, cross_attention, d_model, c_out, d_ff=None,\n                 moving_avg=25, dropout=0.1, activation=\"relu\"):\n        super(DecoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.self_attention = self_attention\n        self.cross_attention = cross_attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False)\n        self.decomp1 = series_decomp(moving_avg)\n        self.decomp2 = series_decomp(moving_avg)\n        self.decomp3 = series_decomp(moving_avg)\n        self.dropout = nn.Dropout(dropout)\n        self.projection = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=3, stride=1, padding=1,\n                                    padding_mode='circular', bias=False)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None):\n        x = x + self.dropout(self.self_attention(\n            x, x, x,\n            attn_mask=x_mask\n        )[0])\n        x, trend1 = self.decomp1(x)\n        x = x + self.dropout(self.cross_attention(\n            x, cross, cross,\n            attn_mask=cross_mask\n        )[0])\n        x, trend2 = self.decomp2(x)\n        y = x\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n        x, trend3 = self.decomp3(x + y)\n\n        residual_trend = trend1 + trend2 + trend3\n        residual_trend = self.projection(residual_trend.permute(0, 2, 1)).transpose(1, 2)\n        return x, residual_trend\n\n\nclass Decoder(nn.Module):\n    \"\"\"\n    Autoformer encoder\n    \"\"\"\n    def __init__(self, layers, norm_layer=None, projection=None):\n        super(Decoder, self).__init__()\n        self.layers = nn.ModuleList(layers)\n        self.norm = norm_layer\n        self.projection = projection\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None):\n        for layer in self.layers:\n            x, residual_trend = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask)\n            trend = trend + residual_trend\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        if self.projection is not None:\n            x = self.projection(x)\n        return x, trend\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/layers/Embed.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import weight_norm\nimport math\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, max_len=5000):\n        super(PositionalEmbedding, self).__init__()\n        # Compute the positional encodings once in log space.\n        pe = torch.zeros(max_len, d_model).float()\n        pe.require_grad = False\n\n        position = torch.arange(0, max_len).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()\n\n        pe[:, 0::2] = torch.sin(position * div_term)\n        pe[:, 1::2] = torch.cos(position * div_term)\n\n        pe = pe.unsqueeze(0)\n        self.register_buffer('pe', pe)\n\n    def forward(self, x):\n        return self.pe[:, :x.size(1)]\n\n\nclass TokenEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(TokenEmbedding, self).__init__()\n        padding = 1 if torch.__version__ >= '1.5.0' else 2\n        self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,\n                                   kernel_size=3, padding=padding, padding_mode='circular', bias=False)\n        for m in self.modules():\n            if isinstance(m, nn.Conv1d):\n                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu')\n\n    def forward(self, x):\n        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)\n        return x\n\n\nclass FixedEmbedding(nn.Module):\n    def __init__(self, c_in, d_model):\n        super(FixedEmbedding, self).__init__()\n\n        w = torch.zeros(c_in, d_model).float()\n        w.require_grad = False\n\n        position = torch.arange(0, c_in).float().unsqueeze(1)\n        div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()\n\n        w[:, 0::2] = torch.sin(position * div_term)\n        w[:, 1::2] = torch.cos(position * div_term)\n\n        self.emb = nn.Embedding(c_in, d_model)\n        self.emb.weight = nn.Parameter(w, requires_grad=False)\n\n    def forward(self, x):\n        return self.emb(x).detach()\n\n\nclass TemporalEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='fixed', freq='h'):\n        super(TemporalEmbedding, self).__init__()\n\n        minute_size = 4\n        hour_size = 24\n        weekday_size = 7\n        day_size = 32\n        month_size = 13\n\n        Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding\n        if freq == 't':\n            self.minute_embed = Embed(minute_size, d_model)\n        self.hour_embed = Embed(hour_size, d_model)\n        self.weekday_embed = Embed(weekday_size, d_model)\n        self.day_embed = Embed(day_size, d_model)\n        self.month_embed = Embed(month_size, d_model)\n\n    def forward(self, x):\n        x = x.long()\n\n        minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, 'minute_embed') else 0.\n        hour_x = self.hour_embed(x[:, :, 3])\n        weekday_x = self.weekday_embed(x[:, :, 2])\n        day_x = self.day_embed(x[:, :, 1])\n        month_x = self.month_embed(x[:, :, 0])\n\n        return hour_x + weekday_x + day_x + month_x + minute_x\n\n\nclass TimeFeatureEmbedding(nn.Module):\n    def __init__(self, d_model, embed_type='timeF', freq='h'):\n        super(TimeFeatureEmbedding, self).__init__()\n\n        freq_map = {'h': 4, 't': 5, 's': 6, 'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3}\n        d_inp = freq_map[freq]\n        self.embed = nn.Linear(d_inp, d_model, bias=False)\n\n    def forward(self, x):\n        return self.embed(x)\n\n\nclass DataEmbedding(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        x = self.value_embedding(x) + self.temporal_embedding(x_mark) + self.position_embedding(x)\n        return self.dropout(x)\n\n\nclass DataEmbedding_wo_pos(nn.Module):\n    def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n        super(DataEmbedding_wo_pos, self).__init__()\n\n        self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)\n        self.position_embedding = PositionalEmbedding(d_model=d_model)\n        self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type,\n                                                    freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding(\n            d_model=d_model, embed_type=embed_type, freq=freq)\n        self.dropout = nn.Dropout(p=dropout)\n\n    def forward(self, x, x_mark):\n        x = self.value_embedding(x) + self.temporal_embedding(x_mark)\n        return self.dropout(x)\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/layers/SelfAttention_Family.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport math\nfrom math import sqrt\nfrom utils.masking import TriangularCausalMask, ProbMask\nfrom reformer_pytorch import LSHSelfAttention\nimport os\n\n\nclass FullAttention(nn.Module):\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n        super(FullAttention, self).__init__()\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def forward(self, queries, keys, values, attn_mask):\n        B, L, H, E = queries.shape\n        _, S, _, D = values.shape\n        scale = self.scale or 1. / sqrt(E)\n\n        scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys)\n\n        if self.mask_flag:\n            if attn_mask is None:\n                attn_mask = TriangularCausalMask(B, L, device=queries.device)\n\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        A = self.dropout(torch.softmax(scale * scores, dim=-1))\n        V = torch.einsum(\"bhls,bshd->blhd\", A, values)\n\n        if self.output_attention:\n            return (V.contiguous(), A)\n        else:\n            return (V.contiguous(), None)\n\n\nclass ProbAttention(nn.Module):\n    def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n        super(ProbAttention, self).__init__()\n        self.factor = factor\n        self.scale = scale\n        self.mask_flag = mask_flag\n        self.output_attention = output_attention\n        self.dropout = nn.Dropout(attention_dropout)\n\n    def _prob_QK(self, Q, K, sample_k, n_top):  # n_top: c*ln(L_q)\n        # Q [B, H, L, D]\n        B, H, L_K, E = K.shape\n        _, _, L_Q, _ = Q.shape\n\n        # calculate the sampled Q_K\n        K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E)\n        index_sample = torch.randint(L_K, (L_Q, sample_k))  # real U = U_part(factor*ln(L_k))*L_q\n        K_sample = K_expand[:, :, torch.arange(L_Q).unsqueeze(1), index_sample, :]\n        Q_K_sample = torch.matmul(Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze()\n\n        # find the Top_k query with sparisty measurement\n        M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K)\n        M_top = M.topk(n_top, sorted=False)[1]\n\n        # use the reduced Q to calculate Q_K\n        Q_reduce = Q[torch.arange(B)[:, None, None],\n                   torch.arange(H)[None, :, None],\n                   M_top, :]  # factor*ln(L_q)\n        Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1))  # factor*ln(L_q)*L_k\n\n        return Q_K, M_top\n\n    def _get_initial_context(self, V, L_Q):\n        B, H, L_V, D = V.shape\n        if not self.mask_flag:\n            # V_sum = V.sum(dim=-2)\n            V_sum = V.mean(dim=-2)\n            contex = V_sum.unsqueeze(-2).expand(B, H, L_Q, V_sum.shape[-1]).clone()\n        else:  # use mask\n            assert (L_Q == L_V)  # requires that L_Q == L_V, i.e. for self-attention only\n            contex = V.cumsum(dim=-2)\n        return contex\n\n    def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):\n        B, H, L_V, D = V.shape\n\n        if self.mask_flag:\n            attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device)\n            scores.masked_fill_(attn_mask.mask, -np.inf)\n\n        attn = torch.softmax(scores, dim=-1)  # nn.Softmax(dim=-1)(scores)\n\n        context_in[torch.arange(B)[:, None, None],\n        torch.arange(H)[None, :, None],\n        index, :] = torch.matmul(attn, V).type_as(context_in)\n        if self.output_attention:\n            attns = (torch.ones([B, H, L_V, L_V]) / L_V).type_as(attn).to(attn.device)\n            attns[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], index, :] = attn\n            return (context_in, attns)\n        else:\n            return (context_in, None)\n\n    def forward(self, queries, keys, values, attn_mask):\n        B, L_Q, H, D = queries.shape\n        _, L_K, _, _ = keys.shape\n\n        queries = queries.transpose(2, 1)\n        keys = keys.transpose(2, 1)\n        values = values.transpose(2, 1)\n\n        U_part = self.factor * np.ceil(np.log(L_K)).astype('int').item()  # c*ln(L_k)\n        u = self.factor * np.ceil(np.log(L_Q)).astype('int').item()  # c*ln(L_q)\n\n        U_part = U_part if U_part < L_K else L_K\n        u = u if u < L_Q else L_Q\n\n        scores_top, index = self._prob_QK(queries, keys, sample_k=U_part, n_top=u)\n\n        # add scale factor\n        scale = self.scale or 1. / sqrt(D)\n        if scale is not None:\n            scores_top = scores_top * scale\n        # get the context\n        context = self._get_initial_context(values, L_Q)\n        # update the context with selected top_k queries\n        context, attn = self._update_context(context, values, scores_top, index, L_Q, attn_mask)\n\n        return context.contiguous(), attn\n\n\nclass AttentionLayer(nn.Module):\n    def __init__(self, attention, d_model, n_heads, d_keys=None,\n                 d_values=None):\n        super(AttentionLayer, self).__init__()\n\n        d_keys = d_keys or (d_model // n_heads)\n        d_values = d_values or (d_model // n_heads)\n\n        self.inner_attention = attention\n        self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n        self.value_projection = nn.Linear(d_model, d_values * n_heads)\n        self.out_projection = nn.Linear(d_values * n_heads, d_model)\n        self.n_heads = n_heads\n\n    def forward(self, queries, keys, values, attn_mask):\n        B, L, _ = queries.shape\n        _, S, _ = keys.shape\n        H = self.n_heads\n\n        queries = self.query_projection(queries).view(B, L, H, -1)\n        keys = self.key_projection(keys).view(B, S, H, -1)\n        values = self.value_projection(values).view(B, S, H, -1)\n\n        out, attn = self.inner_attention(\n            queries,\n            keys,\n            values,\n            attn_mask\n        )\n        out = out.view(B, L, -1)\n\n        return self.out_projection(out), attn\n\n\nclass ReformerLayer(nn.Module):\n    def __init__(self, attention, d_model, n_heads, d_keys=None,\n                 d_values=None, causal=False, bucket_size=4, n_hashes=4):\n        super().__init__()\n        self.bucket_size = bucket_size\n        self.attn = LSHSelfAttention(\n            dim=d_model,\n            heads=n_heads,\n            bucket_size=bucket_size,\n            n_hashes=n_hashes,\n            causal=causal\n        )\n\n    def fit_length(self, queries):\n        # inside reformer: assert N % (bucket_size * 2) == 0\n        B, N, C = queries.shape\n        if N % (self.bucket_size * 2) == 0:\n            return queries\n        else:\n            # fill the time series\n            fill_len = (self.bucket_size * 2) - (N % (self.bucket_size * 2))\n            return torch.cat([queries, torch.zeros([B, fill_len, C]).to(queries.device)], dim=1)\n\n    def forward(self, queries, keys, values, attn_mask):\n        # in Reformer: defalut queries=keys\n        B, N, C = queries.shape\n        queries = self.attn(self.fit_length(queries))[:, :N, :]\n        return queries, None\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/layers/Transformer_EncDec.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvLayer(nn.Module):\n    def __init__(self, c_in):\n        super(ConvLayer, self).__init__()\n        self.downConv = nn.Conv1d(in_channels=c_in,\n                                  out_channels=c_in,\n                                  kernel_size=3,\n                                  padding=2,\n                                  padding_mode='circular')\n        self.norm = nn.BatchNorm1d(c_in)\n        self.activation = nn.ELU()\n        self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)\n\n    def forward(self, x):\n        x = self.downConv(x.permute(0, 2, 1))\n        x = self.norm(x)\n        x = self.activation(x)\n        x = self.maxPool(x)\n        x = x.transpose(1, 2)\n        return x\n\n\nclass EncoderLayer(nn.Module):\n    def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation=\"relu\"):\n        super(EncoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.attention = attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, attn_mask=None):\n        new_x, attn = self.attention(\n            x, x, x,\n            attn_mask=attn_mask\n        )\n        x = x + self.dropout(new_x)\n\n        y = x = self.norm1(x)\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n\n        return self.norm2(x + y), attn\n\n\nclass Encoder(nn.Module):\n    def __init__(self, attn_layers, conv_layers=None, norm_layer=None):\n        super(Encoder, self).__init__()\n        self.attn_layers = nn.ModuleList(attn_layers)\n        self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None\n        self.norm = norm_layer\n\n    def forward(self, x, attn_mask=None):\n        # x [B, L, D]\n        attns = []\n        if self.conv_layers is not None:\n            for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):\n                x, attn = attn_layer(x, attn_mask=attn_mask)\n                x = conv_layer(x)\n                attns.append(attn)\n            x, attn = self.attn_layers[-1](x)\n            attns.append(attn)\n        else:\n            for attn_layer in self.attn_layers:\n                x, attn = attn_layer(x, attn_mask=attn_mask)\n                attns.append(attn)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        return x, attns\n\n\nclass DecoderLayer(nn.Module):\n    def __init__(self, self_attention, cross_attention, d_model, d_ff=None,\n                 dropout=0.1, activation=\"relu\"):\n        super(DecoderLayer, self).__init__()\n        d_ff = d_ff or 4 * d_model\n        self.self_attention = self_attention\n        self.cross_attention = cross_attention\n        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.norm3 = nn.LayerNorm(d_model)\n        self.dropout = nn.Dropout(dropout)\n        self.activation = F.relu if activation == \"relu\" else F.gelu\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None):\n        x = x + self.dropout(self.self_attention(\n            x, x, x,\n            attn_mask=x_mask\n        )[0])\n        x = self.norm1(x)\n\n        x = x + self.dropout(self.cross_attention(\n            x, cross, cross,\n            attn_mask=cross_mask\n        )[0])\n\n        y = x = self.norm2(x)\n        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n        y = self.dropout(self.conv2(y).transpose(-1, 1))\n\n        return self.norm3(x + y)\n\n\nclass Decoder(nn.Module):\n    def __init__(self, layers, norm_layer=None, projection=None):\n        super(Decoder, self).__init__()\n        self.layers = nn.ModuleList(layers)\n        self.norm = norm_layer\n        self.projection = projection\n\n    def forward(self, x, cross, x_mask=None, cross_mask=None):\n        for layer in self.layers:\n            x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask)\n\n        if self.norm is not None:\n            x = self.norm(x)\n\n        if self.projection is not None:\n            x = self.projection(x)\n        return x\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/layers/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/requirements.txt",
    "content": "pandas\nsklearn\ntorchvision\nnumpy\nmatplotlib\nreformer_pytorch"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/run.py",
    "content": "import argparse\nimport os\nimport torch\nfrom exp.exp_main import Exp_Main\nimport random\nimport numpy as np\nimport time\nimport pandas as pd\nfix_seed = 2021\nrandom.seed(fix_seed)\ntorch.manual_seed(fix_seed)\nnp.random.seed(fix_seed)\n\nparser = argparse.ArgumentParser(description='Autoformer & Transformer family for Time Series Forecasting')\n\n# basic config\nparser.add_argument('--is_training', type=int, required=True, default=1, help='status')\nparser.add_argument('--model_id', type=str, required=True, default='test', help='model id')\nparser.add_argument('--model', type=str, required=True, default='Autoformer',\n                    help='model name, options: [Autoformer, Informer, Transformer]')\n\n# data loader\nparser.add_argument('--data', type=str, required=True, default='ETTm1', help='dataset type')\nparser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')\nparser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')\nparser.add_argument('--features', type=str, default='M',\n                    help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\nparser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')\nparser.add_argument('--freq', type=str, default='h',\n                    help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\nparser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n# forecasting task\nparser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\nparser.add_argument('--label_len', type=int, default=48, help='start token length')\nparser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\n\n# model define\nparser.add_argument('--bucket_size', type=int, default=4, help='for Reformer')\nparser.add_argument('--n_hashes', type=int, default=4, help='for Reformer')\nparser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\nparser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\nparser.add_argument('--c_out', type=int, default=7, help='output size')\nparser.add_argument('--d_model', type=int, default=512, help='dimension of model')\nparser.add_argument('--num_inputs', type=int, default=512, help='dimension of model')\nparser.add_argument('--num_channels', type=int, default=512, help='dimension of model')\nparser.add_argument('--n_heads', type=int, default=8, help='num of heads')\nparser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')\nparser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')\nparser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\nparser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')\nparser.add_argument('--factor', type=int, default=1, help='attn factor')\nparser.add_argument('--distil', action='store_false',\n                    help='whether to use distilling in encoder, using this argument means not using distilling',\n                    default=True)\nparser.add_argument('--dropout', type=float, default=0.05, help='dropout')\nparser.add_argument('--embed', type=str, default='timeF',\n                    help='time features encoding, options:[timeF, fixed, learned]')\nparser.add_argument('--activation', type=str, default='gelu', help='activation')\nparser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')\nparser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')\n\n# optimization\nparser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')\nparser.add_argument('--itr', type=int, default=2, help='experiments times')\nparser.add_argument('--train_epochs', type=int, default=10, help='train epochs')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')\nparser.add_argument('--patience', type=int, default=3, help='early stopping patience')\nparser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')\nparser.add_argument('--des', type=str, default='test', help='exp description')\nparser.add_argument('--loss', type=str, default='mse', help='loss function')\nparser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')\nparser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)\n\n# GPU\nparser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\nparser.add_argument('--gpu', type=int, default=0, help='gpu')\nparser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)\nparser.add_argument('--devices', type=str, default='0', help='device ids of multile gpus')\n\nargs = parser.parse_args()\n\nargs.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n\nif args.use_gpu and args.use_multi_gpu:\n    args.dvices = args.devices.replace(' ', '')\n    device_ids = args.devices.split(',')\n    args.device_ids = [int(id_) for id_ in device_ids]\n    args.gpu = args.device_ids[0]\n\nprint('Args in experiment:')\nprint(args)\n\nExp = Exp_Main\n\nif args.is_training:\n    for ii in range(args.itr):\n        # setting record of experiments\n        setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n            args.model_id,\n            args.model,\n            args.data,\n            args.features,\n            args.seq_len,\n            args.label_len,\n            args.pred_len,\n            args.d_model,\n            args.n_heads,\n            args.e_layers,\n            args.d_layers,\n            args.d_ff,\n            args.factor,\n            args.embed,\n            args.distil,\n            args.des, ii)\n\n        exp = Exp(args)  # set experiments\n        print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n        t = time.time()\n        exp.train(setting)\n        train_time = time.time() - t\n\n        print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n        t = time.time()\n        exp.test(setting)\n        inference_time = time.time() - t\n\n        if args.do_predict:\n            print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n            exp.predict(setting, True)\n        \n        folder_path = './results/' + setting + '/'\n        if not os.path.exists(folder_path):\n            os.makedirs(folder_path)\n        np.save(folder_path + 'time.npy', np.array([train_time, inference_time]))\n        torch.cuda.empty_cache()\nelse:\n    ii = 0\n    setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(args.model_id,\n                                                                                                  args.model,\n                                                                                                  args.data,\n                                                                                                  args.features,\n                                                                                                  args.seq_len,\n                                                                                                  args.label_len,\n                                                                                                  args.pred_len,\n                                                                                                  args.d_model,\n                                                                                                  args.n_heads,\n                                                                                                  args.e_layers,\n                                                                                                  args.d_layers,\n                                                                                                  args.d_ff,\n                                                                                                  args.factor,\n                                                                                                  args.embed,\n                                                                                                  args.distil,\n                                                                                                  args.des, ii)\n\n    exp = Exp(args)  # set experiments\n    print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n    exp.test(setting, test=1)\n    torch.cuda.empty_cache()\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/utils/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/utils/download_data.py",
    "content": "import requests\n\nif __name__==\"__main__\":\n    source_url = 'https://cloud.tsinghua.edu.cn/d/e1ccfff39ad541908bae/files/?p=%2Fall_six_datasets.zip&dl=1'\n    headers = {'User-Agent': 'Mozilla/5.0'}\n    res = requests.get(source_url, headers=headers)\n\n    with open('dataset/datasets.zip', 'wb') as f:\n        f.write(res.content)\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/utils/masking.py",
    "content": "import torch\n\n\nclass TriangularCausalMask():\n    def __init__(self, B, L, device=\"cpu\"):\n        mask_shape = [B, 1, L, L]\n        with torch.no_grad():\n            self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)\n\n    @property\n    def mask(self):\n        return self._mask\n\n\nclass ProbMask():\n    def __init__(self, B, H, L, index, scores, device=\"cpu\"):\n        _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1)\n        _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1])\n        indicator = _mask_ex[torch.arange(B)[:, None, None],\n                    torch.arange(H)[None, :, None],\n                    index, :].to(device)\n        self._mask = indicator.view(scores.shape).to(device)\n\n    @property\n    def mask(self):\n        return self._mask\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/utils/metrics.py",
    "content": "import numpy as np\n\n\ndef RSE(pred, true):\n    return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2))\n\n\ndef CORR(pred, true):\n    u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0)\n    d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0))\n    return (u / d).mean(-1)\n\n\ndef MAE(pred, true):\n    return np.mean(np.abs(pred - true))\n\n\ndef MSE(pred, true):\n    return np.mean((pred - true) ** 2)\n\n\ndef RMSE(pred, true):\n    return np.sqrt(MSE(pred, true))\n\n\ndef MAPE(pred, true):\n    return np.mean(np.abs((pred - true) / true))\n\n\ndef MSPE(pred, true):\n    return np.mean(np.square((pred - true) / true))\n\n\ndef metric(pred, true):\n    mae = MAE(pred, true)\n    mse = MSE(pred, true)\n    rmse = RMSE(pred, true)\n    mape = MAPE(pred, true)\n    mspe = MSPE(pred, true)\n\n    return mae, mse, rmse, mape, mspe\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/utils/timefeatures.py",
    "content": "from typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n\nclass TimeFeature:\n    def __init__(self):\n        pass\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        pass\n\n    def __repr__(self):\n        return self.__class__.__name__ + \"()\"\n\n\nclass SecondOfMinute(TimeFeature):\n    \"\"\"Minute of hour encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.second / 59.0 - 0.5\n\n\nclass MinuteOfHour(TimeFeature):\n    \"\"\"Minute of hour encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.minute / 59.0 - 0.5\n\n\nclass HourOfDay(TimeFeature):\n    \"\"\"Hour of day encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.hour / 23.0 - 0.5\n\n\nclass DayOfWeek(TimeFeature):\n    \"\"\"Hour of day encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.dayofweek / 6.0 - 0.5\n\n\nclass DayOfMonth(TimeFeature):\n    \"\"\"Day of month encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.day - 1) / 30.0 - 0.5\n\n\nclass DayOfYear(TimeFeature):\n    \"\"\"Day of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.dayofyear - 1) / 365.0 - 0.5\n\n\nclass MonthOfYear(TimeFeature):\n    \"\"\"Month of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.month - 1) / 11.0 - 0.5\n\n\nclass WeekOfYear(TimeFeature):\n    \"\"\"Week of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.isocalendar().week - 1) / 52.0 - 0.5\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n    \"\"\"\n    Returns a list of time features that will be appropriate for the given frequency string.\n    Parameters\n    ----------\n    freq_str\n        Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n    \"\"\"\n\n    features_by_offsets = {\n        offsets.YearEnd: [],\n        offsets.QuarterEnd: [MonthOfYear],\n        offsets.MonthEnd: [MonthOfYear],\n        offsets.Week: [DayOfMonth, WeekOfYear],\n        offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.Minute: [\n            MinuteOfHour,\n            HourOfDay,\n            DayOfWeek,\n            DayOfMonth,\n            DayOfYear,\n        ],\n        offsets.Second: [\n            SecondOfMinute,\n            MinuteOfHour,\n            HourOfDay,\n            DayOfWeek,\n            DayOfMonth,\n            DayOfYear,\n        ],\n    }\n\n    offset = to_offset(freq_str)\n\n    for offset_type, feature_classes in features_by_offsets.items():\n        if isinstance(offset, offset_type):\n            return [cls() for cls in feature_classes]\n\n    supported_freq_msg = f\"\"\"\n    Unsupported frequency {freq_str}\n    The following frequencies are supported:\n        Y   - yearly\n            alias: A\n        M   - monthly\n        W   - weekly\n        D   - daily\n        B   - business days\n        H   - hourly\n        T   - minutely\n            alias: min\n        S   - secondly\n    \"\"\"\n    raise RuntimeError(supported_freq_msg)\n\n\ndef time_features(dates, freq='h'):\n    return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)])\n"
  },
  {
    "path": "ts_forecasting_methods/SupervisedBaselines/utils/tools.py",
    "content": "import numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\nplt.switch_backend('agg')\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n    # lr = args.learning_rate * (0.2 ** (epoch // 2))\n    if args.lradj == 'type1':\n        lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))}\n    elif args.lradj == 'type2':\n        lr_adjust = {\n            2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,\n            10: 5e-7, 15: 1e-7, 20: 5e-8\n        }\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group['lr'] = lr\n        print('Updating learning rate to {}'.format(lr))\n\n\nclass EarlyStopping:\n    def __init__(self, patience=7, verbose=False, delta=0):\n        self.patience = patience\n        self.verbose = verbose\n        self.counter = 0\n        self.best_score = None\n        self.early_stop = False\n        self.val_loss_min = np.Inf\n        self.delta = delta\n\n    def __call__(self, val_loss, model, path):\n        score = -val_loss\n        if self.best_score is None:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n        elif score < self.best_score + self.delta:\n            self.counter += 1\n            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n            if self.counter >= self.patience:\n                self.early_stop = True\n        else:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n            self.counter = 0\n\n    def save_checkpoint(self, val_loss, model, path):\n        if self.verbose:\n            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')\n        torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')\n        self.val_loss_min = val_loss\n\n\nclass dotdict(dict):\n    \"\"\"dot.notation access to dictionary attributes\"\"\"\n    __getattr__ = dict.get\n    __setattr__ = dict.__setitem__\n    __delattr__ = dict.__delitem__\n\n\nclass StandardScaler():\n    def __init__(self, mean, std):\n        self.mean = mean\n        self.std = std\n\n    def transform(self, data):\n        return (data - self.mean) / self.std\n\n    def inverse_transform(self, data):\n        return (data * self.std) + self.mean\n\n\ndef visual(true, preds=None, name='./pic/test.pdf'):\n    \"\"\"\n    Results visualization\n    \"\"\"\n    plt.figure()\n    plt.plot(true, label='GroundTruth', linewidth=2)\n    if preds is not None:\n        plt.plot(preds, label='Prediction', linewidth=2)\n    plt.legend()\n    plt.savefig(name, bbox_inches='tight')\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/README.md",
    "content": "# TS2Vec\n\nThis repository contains the official implementation for the paper [TS2Vec: Towards Universal Representation of Time Series](https://arxiv.org/abs/2106.10466) (AAAI-22).\n\n\n## Data\n\nThe datasets can be obtained and put into `datasets/` folder in the following way:\n\n* [3 ETT datasets](https://github.com/zhouhaoyi/ETDataset) should be placed at `datasets/ETTh1.csv`, `datasets/ETTh2.csv` and `datasets/ETTm1.csv`.\n* [Electricity dataset](https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014) should be preprocessed using `datasets/preprocess_electricity.py` and placed at `datasets/electricity.csv`.\n\n\n## Usage\n\nTo train and evaluate TS2Vec on a dataset, run the following command:\n\n```train & evaluate\npython train.py <dataset_name> <run_name> --loader <loader> --batch-size <batch_size> --repr-dims <repr_dims> --gpu <gpu> --eval\n```\nThe detailed descriptions about the arguments are as following:\n| Parameter name | Description of parameter |\n| --- | --- |\n| dataset_name | The dataset name |\n| run_name | The folder name used to save model, output and evaluation metrics. This can be set to any word |\n| loader | The data loader used to load the experimental data. This can be set to `UCR`, `UEA`, `forecast_csv`, `forecast_csv_univar`, `anomaly`, or `anomaly_coldstart` |\n| batch_size | The batch size (defaults to 8) |\n| repr_dims | The representation dimensions (defaults to 320) |\n| gpu | The gpu no. used for training and inference (defaults to 0) |\n| eval | Whether to perform evaluation after training |\n\n(For descriptions of more arguments, run `python train.py -h`.)\n\nAfter training and evaluation, the trained encoder, output and evaluation metrics can be found in `training/DatasetName__RunName_Date_Time/`. \n\n**Scripts:** The scripts for reproduction are provided in `scripts/` folder.\n\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/__init__.py",
    "content": ""
  },
  {
    "path": "ts_forecasting_methods/ts2vec/data_provider/__init__.py",
    "content": "\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/data_provider/data_factory.py",
    "content": "# from .data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_M4, PSMSegLoader, \\\n#     MSLSegLoader, SMAPSegLoader, SMDSegLoader, SWATSegLoader, UEAloader\n# from .uea import collate_fn\n# from torch.utils.data import DataLoader\n#\n# data_dict = {\n#     'ETTh1': Dataset_ETT_hour,\n#     'ETTh2': Dataset_ETT_hour,\n#     'ETTm1': Dataset_ETT_minute,\n#     'ETTm2': Dataset_ETT_minute,\n#     'custom': Dataset_Custom,\n#     'm4': Dataset_M4,\n#     'PSM': PSMSegLoader,\n#     'MSL': MSLSegLoader,\n#     'SMAP': SMAPSegLoader,\n#     'SMD': SMDSegLoader,\n#     'SWAT': SWATSegLoader,\n#     'UEA': UEAloader\n# }\n#\n#\n# def data_provider(args, flag):\n#     Data = data_dict[args.data]\n#     timeenc = 0 if args.embed != 'timeF' else 1\n#\n#     if flag == 'test':\n#         shuffle_flag = False\n#         drop_last = True\n#         if args.task_name == 'anomaly_detection' or args.task_name == 'classification':\n#             batch_size = args.batch_size\n#         else:\n#             batch_size = 1  # bsz=1 for evaluation\n#         freq = args.freq\n#     else:\n#         shuffle_flag = True\n#         drop_last = True\n#         batch_size = args.batch_size  # bsz for train and valid\n#         freq = args.freq\n#\n#     if args.task_name == 'anomaly_detection':\n#         drop_last = False\n#         data_set = Data(\n#             root_path=args.root_path,\n#             win_size=args.seq_len,\n#             flag=flag,\n#         )\n#         print(flag, len(data_set))\n#         data_loader = DataLoader(\n#             data_set,\n#             batch_size=batch_size,\n#             shuffle=shuffle_flag,\n#             num_workers=args.num_workers,\n#             drop_last=drop_last)\n#         return data_set, data_loader\n#     elif args.task_name == 'classification':\n#         drop_last = False\n#         data_set = Data(\n#             root_path=args.root_path,\n#             flag=flag,\n#         )\n#\n#         data_loader = DataLoader(\n#             data_set,\n#             batch_size=batch_size,\n#             shuffle=shuffle_flag,\n#             num_workers=args.num_workers,\n#             drop_last=drop_last,\n#             collate_fn=lambda x: collate_fn(x, max_len=args.seq_len)\n#         )\n#         return data_set, data_loader\n#     else:\n#         if args.data == 'm4':\n#             drop_last = False\n#         data_set = Data(\n#             root_path=args.root_path,\n#             data_path=args.data_path,\n#             flag=flag,\n#             size=[args.seq_len, args.label_len, args.pred_len],\n#             features=args.features,\n#             target=args.target,\n#             timeenc=timeenc,\n#             freq=freq,\n#             seasonal_patterns=args.seasonal_patterns\n#         )\n#         print(flag, len(data_set))\n#         data_loader = DataLoader(\n#             data_set,\n#             batch_size=batch_size,\n#             shuffle=shuffle_flag,\n#             num_workers=args.num_workers,\n#             drop_last=drop_last)\n#         return data_set, data_loader\n\nfrom .data_loader import Dataset_Custom, Dataset_Pred, Dataset_TSF, Dataset_ETT_hour, Dataset_ETT_minute\nfrom torch.utils.data import DataLoader\n\ndata_dict = {\n    'custom': Dataset_Custom,\n    'tsf_data': Dataset_TSF,\n    'ett_h': Dataset_ETT_hour,\n    'ett_m': Dataset_ETT_minute,\n}\n\n\ndef data_provider(args, flag, drop_last_test=True, train_all=False):\n    Data = data_dict[args.data]\n    timeenc = 0 if args.embed != 'timeF' else 1\n    percent = args.percent\n    max_len = args.max_len\n\n    if flag == 'test':\n        shuffle_flag = False\n        drop_last = drop_last_test\n        batch_size = args.batch_size\n        freq = args.freq\n    elif flag == 'pred':\n        shuffle_flag = False\n        drop_last = False\n        batch_size = 1\n        freq = args.freq\n        Data = Dataset_Pred\n    elif flag == 'val':\n        shuffle_flag = True\n        drop_last = drop_last_test\n        batch_size = args.batch_size\n        freq = args.freq\n    else:\n        shuffle_flag = True\n        drop_last = True\n        batch_size = args.batch_size\n        freq = args.freq\n\n    data_set = Data(\n        root_path=args.root_path,\n        data_path=args.data_path,\n        flag=flag,\n        size=[args.seq_len, args.label_len, args.pred_len],\n        features=args.features,\n        target=args.target,\n        timeenc=timeenc,\n        freq=freq,\n        percent=percent,\n        max_len=max_len,\n        train_all=train_all\n    )\n    print(flag, len(data_set))\n    data_loader = DataLoader(\n        data_set,\n        batch_size=batch_size,\n        shuffle=shuffle_flag,\n        num_workers=args.num_workers,\n        drop_last=drop_last)\n    return data_set, data_loader\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/data_provider/data_loader.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport glob\nimport re\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sktime.datasets import load_from_tsfile_to_dataframe\nimport warnings\nfrom typing import List\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\nfrom .tools import convert_tsf_to_dataframe\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n\nwarnings.filterwarnings('ignore')\n\n\nclass TimeFeature:\n    def __init__(self):\n        pass\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        pass\n\n    def __repr__(self):\n        return self.__class__.__name__ + \"()\"\n\n\nclass SecondOfMinute(TimeFeature):\n    \"\"\"Minute of hour encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.second / 59.0 - 0.5\n\n\nclass MinuteOfHour(TimeFeature):\n    \"\"\"Minute of hour encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.minute / 59.0 - 0.5\n\n\nclass HourOfDay(TimeFeature):\n    \"\"\"Hour of day encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.hour / 23.0 - 0.5\n\n\nclass DayOfWeek(TimeFeature):\n    \"\"\"Hour of day encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return index.dayofweek / 6.0 - 0.5\n\n\nclass DayOfMonth(TimeFeature):\n    \"\"\"Day of month encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.day - 1) / 30.0 - 0.5\n\n\nclass DayOfYear(TimeFeature):\n    \"\"\"Day of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.dayofyear - 1) / 365.0 - 0.5\n\n\nclass MonthOfYear(TimeFeature):\n    \"\"\"Month of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.month - 1) / 11.0 - 0.5\n\n\nclass WeekOfYear(TimeFeature):\n    \"\"\"Week of year encoded as value between [-0.5, 0.5]\"\"\"\n\n    def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n        return (index.isocalendar().week - 1) / 52.0 - 0.5\n\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n    \"\"\"\n    Returns a list of time features that will be appropriate for the given frequency string.\n    Parameters\n    ----------\n    freq_str\n        Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n    \"\"\"\n\n    features_by_offsets = {\n        offsets.YearEnd: [],\n        offsets.QuarterEnd: [MonthOfYear],\n        offsets.MonthEnd: [MonthOfYear],\n        offsets.Week: [DayOfMonth, WeekOfYear],\n        offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n        offsets.Minute: [\n            MinuteOfHour,\n            HourOfDay,\n            DayOfWeek,\n            DayOfMonth,\n            DayOfYear,\n        ],\n        offsets.Second: [\n            SecondOfMinute,\n            MinuteOfHour,\n            HourOfDay,\n            DayOfWeek,\n            DayOfMonth,\n            DayOfYear,\n        ],\n    }\n\n    print(\"freq_str = \", freq_str)\n    offset = to_offset(freq_str)\n\n    for offset_type, feature_classes in features_by_offsets.items():\n        if isinstance(offset, offset_type):\n            return [cls() for cls in feature_classes]\n\n    supported_freq_msg = f\"\"\"\n    Unsupported frequency {freq_str}\n    The following frequencies are supported:\n        Y   - yearly\n            alias: A\n        M   - monthly\n        W   - weekly\n        D   - daily\n        B   - business days\n        H   - hourly\n        T   - minutely\n            alias: min\n        S   - secondly\n    \"\"\"\n    raise RuntimeError(supported_freq_msg)\n\n\ndef time_features(dates, freq='h'):\n    return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)])\n\n\n\nclass Dataset_ETT_hour(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, timeenc=0, freq='h',\n                 percent=100, max_len=-1, train_all=False):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.percent = percent\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n        self.enc_in = self.data_x.shape[-1]\n        print(\"self.enc_in = {}\".format(self.enc_in))\n        print(\"self.data_x = {}\".format(self.data_x.shape))\n        self.tot_len = len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]\n        border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n\n        if self.set_type == 0:\n            border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        feat_id = index // self.tot_len\n        s_begin = index % self.tot_len\n\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n        seq_x = self.data_x[s_begin:s_end, feat_id:feat_id + 1]\n        seq_y = self.data_y[r_begin:r_end, feat_id:feat_id + 1]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return (len(self.data_x) - self.seq_len - self.pred_len + 1) * self.enc_in\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_ETT_minute(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTm1.csv',\n                 target='OT', scale=True, timeenc=0, freq='t',\n                 percent=100, max_len=-1, train_all=False):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n        self.percent = percent\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n        self.enc_in = self.data_x.shape[-1]\n        self.tot_len = len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]\n        border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n        if self.set_type == 0:\n            border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n            df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        feat_id = index // self.tot_len\n        s_begin = index % self.tot_len\n\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n        seq_x = self.data_x[s_begin:s_end, feat_id:feat_id + 1]\n        seq_y = self.data_y[r_begin:r_end, feat_id:feat_id + 1]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return (len(self.data_x) - self.seq_len - self.pred_len + 1) * self.enc_in\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_Custom(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, timeenc=0, freq='h',\n                 percent=10, max_len=-1, train_all=False):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['train', 'test', 'val']\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.timeenc = timeenc\n        self.freq = freq\n        self.percent = percent\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n        self.enc_in = self.data_x.shape[-1]\n        self.tot_len = len(self.data_x) - self.seq_len - self.pred_len + 1\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n\n        '''\n        df_raw.columns: ['date', ...(other features), target feature]\n        '''\n        cols = list(df_raw.columns)\n        cols.remove(self.target)\n        cols.remove('date')\n        df_raw = df_raw[['date'] + cols + [self.target]]\n        # print(cols)\n        num_train = int(len(df_raw) * 0.7)\n        num_test = int(len(df_raw) * 0.2)\n        num_vali = len(df_raw) - num_train - num_test\n        border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n        border2s = [num_train, num_train + num_vali, len(df_raw)]\n        border1 = border1s[self.set_type]\n        border2 = border2s[self.set_type]\n\n        if self.set_type == 0:\n            border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            train_data = df_data[border1s[0]:border2s[0]]\n            self.scaler.fit(train_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        df_stamp = df_raw[['date']][border1:border2]\n        df_stamp['date'] = pd.to_datetime(df_stamp.date)\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        feat_id = index // self.tot_len\n        s_begin = index % self.tot_len\n\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n        seq_x = self.data_x[s_begin:s_end, feat_id:feat_id + 1]\n        seq_y = self.data_y[r_begin:r_end, feat_id:feat_id + 1]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return (len(self.data_x) - self.seq_len - self.pred_len + 1) * self.enc_in\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_Pred(Dataset):\n    def __init__(self, root_path, flag='pred', size=None,\n                 features='S', data_path='ETTh1.csv',\n                 target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None,\n                 percent=None, train_all=False):\n        # size [seq_len, label_len, pred_len]\n        # info\n        if size == None:\n            self.seq_len = 24 * 4 * 4\n            self.label_len = 24 * 4\n            self.pred_len = 24 * 4\n        else:\n            self.seq_len = size[0]\n            self.label_len = size[1]\n            self.pred_len = size[2]\n        # init\n        assert flag in ['pred']\n\n        self.features = features\n        self.target = target\n        self.scale = scale\n        self.inverse = inverse\n        self.timeenc = timeenc\n        self.freq = freq\n        self.cols = cols\n        self.root_path = root_path\n        self.data_path = data_path\n        self.__read_data__()\n\n    def __read_data__(self):\n        self.scaler = StandardScaler()\n        df_raw = pd.read_csv(os.path.join(self.root_path,\n                                          self.data_path))\n        '''\n        df_raw.columns: ['date', ...(other features), target feature]\n        '''\n        if self.cols:\n            cols = self.cols.copy()\n            cols.remove(self.target)\n        else:\n            cols = list(df_raw.columns)\n            cols.remove(self.target)\n            cols.remove('date')\n        df_raw = df_raw[['date'] + cols + [self.target]]\n        border1 = len(df_raw) - self.seq_len\n        border2 = len(df_raw)\n\n        if self.features == 'M' or self.features == 'MS':\n            cols_data = df_raw.columns[1:]\n            df_data = df_raw[cols_data]\n        elif self.features == 'S':\n            df_data = df_raw[[self.target]]\n\n        if self.scale:\n            self.scaler.fit(df_data.values)\n            data = self.scaler.transform(df_data.values)\n        else:\n            data = df_data.values\n\n        tmp_stamp = df_raw[['date']][border1:border2]\n        tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)\n        pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq)\n\n        df_stamp = pd.DataFrame(columns=['date'])\n        df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])\n        if self.timeenc == 0:\n            df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n            df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n            df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n            df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n            df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n            df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n            data_stamp = df_stamp.drop(['date'], 1).values\n        elif self.timeenc == 1:\n            data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n            data_stamp = data_stamp.transpose(1, 0)\n\n        self.data_x = data[border1:border2]\n        if self.inverse:\n            self.data_y = df_data.values[border1:border2]\n        else:\n            self.data_y = data[border1:border2]\n        self.data_stamp = data_stamp\n\n    def __getitem__(self, index):\n        s_begin = index\n        s_end = s_begin + self.seq_len\n        r_begin = s_end - self.label_len\n        r_end = r_begin + self.label_len + self.pred_len\n\n        seq_x = self.data_x[s_begin:s_end]\n        if self.inverse:\n            seq_y = self.data_x[r_begin:r_begin + self.label_len]\n        else:\n            seq_y = self.data_y[r_begin:r_begin + self.label_len]\n        seq_x_mark = self.data_stamp[s_begin:s_end]\n        seq_y_mark = self.data_stamp[r_begin:r_end]\n\n        return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n    def __len__(self):\n        return len(self.data_x) - self.seq_len + 1\n\n    def inverse_transform(self, data):\n        return self.scaler.inverse_transform(data)\n\n\nclass Dataset_TSF(Dataset):\n    def __init__(self, root_path, flag='train', size=None,\n                 features='S', data_path=None,\n                 target='OT', scale=True, timeenc=0, freq='Daily',\n                 percent=10, max_len=-1, train_all=False):\n\n        self.train_all = train_all\n\n        self.seq_len = size[0]\n        self.pred_len = size[2]\n        type_map = {'train': 0, 'val': 1, 'test': 2}\n        self.set_type = type_map[flag]\n\n        self.percent = percent\n        self.max_len = max_len\n        if self.max_len == -1:\n            self.max_len = 1e8\n\n        self.root_path = root_path\n        self.data_path = data_path\n        self.timeseries = self.__read_data__()\n\n    def __read_data__(self):\n        df, frequency, forecast_horizon, contain_missing_values, contain_equal_length = convert_tsf_to_dataframe(\n            os.path.join(self.root_path,\n                         self.data_path))\n        self.freq = frequency\n\n        def dropna(x):\n            return x[~np.isnan(x)]\n\n        timeseries = [dropna(ts).astype(np.float32) for ts in df.series_value]\n\n        self.tot_len = 0\n        self.len_seq = []\n        self.seq_id = []\n        for i in range(len(timeseries)):\n            res_len = max(self.pred_len + self.seq_len - timeseries[i].shape[0], 0)\n            pad_zeros = np.zeros(res_len)\n            timeseries[i] = np.hstack([pad_zeros, timeseries[i]])\n\n            _len = timeseries[i].shape[0]\n            train_len = _len - self.pred_len\n            if self.train_all:\n                border1s = [0, 0, train_len - self.seq_len]\n                border2s = [train_len, train_len, _len]\n            else:\n                border1s = [0, train_len - self.seq_len - self.pred_len, train_len - self.seq_len]\n                border2s = [train_len - self.pred_len, train_len, _len]\n            border2s[0] = (border2s[0] - self.seq_len) * self.percent // 100 + self.seq_len\n            # print(\"_len = {}\".format(_len))\n\n            curr_len = border2s[self.set_type] - max(border1s[self.set_type], 0) - self.pred_len - self.seq_len + 1\n            curr_len = max(0, curr_len)\n\n            self.len_seq.append(np.zeros(curr_len) + self.tot_len)\n            self.seq_id.append(np.zeros(curr_len) + i)\n            self.tot_len += curr_len\n\n        self.len_seq = np.hstack(self.len_seq)\n        self.seq_id = np.hstack(self.seq_id)\n\n        return timeseries\n\n    def __getitem__(self, index):\n        len_seq = self.len_seq[index]\n        seq_id = int(self.seq_id[index])\n        index = index - int(len_seq)\n\n        _len = self.timeseries[seq_id].shape[0]\n        train_len = _len - self.pred_len\n        if self.train_all:\n            border1s = [0, 0, train_len - self.seq_len]\n            border2s = [train_len, train_len, _len]\n        else:\n            border1s = [0, train_len - self.seq_len - self.pred_len, train_len - self.seq_len]\n            border2s = [train_len - self.pred_len, train_len, _len]\n        border2s[0] = (border2s[0] - self.seq_len) * self.percent // 100 + self.seq_len\n\n        s_begin = index + border1s[self.set_type]\n        s_end = s_begin + self.seq_len\n        r_begin = s_end\n        r_end = r_begin + self.pred_len\n        if self.set_type == 2:\n            s_end = -self.pred_len\n\n        data_x = self.timeseries[seq_id][s_begin:s_end]\n        data_y = self.timeseries[seq_id][r_begin:r_end]\n        data_x = np.expand_dims(data_x, axis=-1)\n        data_y = np.expand_dims(data_y, axis=-1)\n        # if self.set_type == 2:\n        #     print(\"data_x.shape = {}, data_y.shape = {}\".format(data_x.shape, data_y.shape))\n\n        return data_x, data_y, data_x, data_y\n\n    def __len__(self):\n        if self.set_type == 0:\n            # return self.tot_len\n            return min(self.max_len, self.tot_len)\n        else:\n            return self.tot_len\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/data_provider/m4.py",
    "content": "# This source code is provided for the purposes of scientific reproducibility\n# under the following limited license from Element AI Inc. The code is an\n# implementation of the N-BEATS model (Oreshkin et al., N-BEATS: Neural basis\n# expansion analysis for interpretable time series forecasting,\n# https://arxiv.org/abs/1905.10437). The copyright to the source code is\n# licensed under the Creative Commons - Attribution-NonCommercial 4.0\n# International license (CC BY-NC 4.0):\n# https://creativecommons.org/licenses/by-nc/4.0/.  Any commercial use (whether\n# for the benefit of third parties or internally in production) requires an\n# explicit license. The subject-matter of the N-BEATS model and associated\n# materials are the property of Element AI Inc. and may be subject to patent\n# protection. No license to patents is granted hereunder (whether express or\n# implied). Copyright © 2020 Element AI Inc. All rights reserved.\n\n\"\"\"\nM4 Dataset\n\"\"\"\nimport logging\nimport os\nfrom collections import OrderedDict\nfrom dataclasses import dataclass\nfrom glob import glob\n\nimport numpy as np\nimport pandas as pd\n# import patoolib\nfrom tqdm import tqdm\nimport logging\nimport os\nimport pathlib\nimport sys\nfrom urllib import request\n\n\ndef url_file_name(url: str) -> str:\n    \"\"\"\n    Extract file name from url.\n\n    :param url: URL to extract file name from.\n    :return: File name.\n    \"\"\"\n    return url.split('/')[-1] if len(url) > 0 else ''\n\n\ndef download(url: str, file_path: str) -> None:\n    \"\"\"\n    Download a file to the given path.\n\n    :param url: URL to download\n    :param file_path: Where to download the content.\n    \"\"\"\n\n    def progress(count, block_size, total_size):\n        progress_pct = float(count * block_size) / float(total_size) * 100.0\n        sys.stdout.write('\\rDownloading {} to {} {:.1f}%'.format(url, file_path, progress_pct))\n        sys.stdout.flush()\n\n    if not os.path.isfile(file_path):\n        opener = request.build_opener()\n        opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n        request.install_opener(opener)\n        pathlib.Path(os.path.dirname(file_path)).mkdir(parents=True, exist_ok=True)\n        f, _ = request.urlretrieve(url, file_path, progress)\n        sys.stdout.write('\\n')\n        sys.stdout.flush()\n        file_info = os.stat(f)\n        logging.info(f'Successfully downloaded {os.path.basename(file_path)} {file_info.st_size} bytes.')\n    else:\n        file_info = os.stat(file_path)\n        logging.info(f'File already exists: {file_path} {file_info.st_size} bytes.')\n\n\n@dataclass()\nclass M4Dataset:\n    ids: np.ndarray\n    groups: np.ndarray\n    frequencies: np.ndarray\n    horizons: np.ndarray\n    values: np.ndarray\n\n    @staticmethod\n    def load(training: bool = True, dataset_file: str = '../dataset/m4') -> 'M4Dataset':\n        \"\"\"\n        Load cached dataset.\n\n        :param training: Load training part if training is True, test part otherwise.\n        \"\"\"\n        info_file = os.path.join(dataset_file, 'M4-info.csv')\n        train_cache_file = os.path.join(dataset_file, 'training.npz')\n        test_cache_file = os.path.join(dataset_file, 'test.npz')\n        m4_info = pd.read_csv(info_file)\n        return M4Dataset(ids=m4_info.M4id.values,\n                         groups=m4_info.SP.values,\n                         frequencies=m4_info.Frequency.values,\n                         horizons=m4_info.Horizon.values,\n                         values=np.load(\n                             train_cache_file if training else test_cache_file,\n                             allow_pickle=True))\n\n\n@dataclass()\nclass M4Meta:\n    seasonal_patterns = ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Hourly']\n    horizons = [6, 8, 18, 13, 14, 48]\n    frequencies = [1, 4, 12, 1, 1, 24]\n    horizons_map = {\n        'Yearly': 6,\n        'Quarterly': 8,\n        'Monthly': 18,\n        'Weekly': 13,\n        'Daily': 14,\n        'Hourly': 48\n    }  # different predict length\n    frequency_map = {\n        'Yearly': 1,\n        'Quarterly': 4,\n        'Monthly': 12,\n        'Weekly': 1,\n        'Daily': 1,\n        'Hourly': 24\n    }\n    history_size = {\n        'Yearly': 1.5,\n        'Quarterly': 1.5,\n        'Monthly': 1.5,\n        'Weekly': 10,\n        'Daily': 10,\n        'Hourly': 10\n    }  # from interpretable.gin\n\n\ndef load_m4_info() -> pd.DataFrame:\n    \"\"\"\n    Load M4Info file.\n\n    :return: Pandas DataFrame of M4Info.\n    \"\"\"\n    return pd.read_csv(INFO_FILE_PATH)\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/data_provider/metrics.py",
    "content": "import numpy as np\n\n\ndef RSE(pred, true):\n    return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2))\n\n\ndef CORR(pred, true):\n    u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0)\n    d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0))\n    return (u / d).mean(-1)\n\n\ndef MAE(pred, true):\n    return np.mean(np.abs(pred - true))\n\n\ndef MSE(pred, true):\n    return np.mean((pred - true) ** 2)\n\n\ndef RMSE(pred, true):\n    return np.sqrt(MSE(pred, true))\n\n\ndef MAPE(pred, true):\n    return np.mean(np.abs(100 * (pred - true) / (true +1e-8)))\n\n\ndef MSPE(pred, true):\n    return np.mean(np.square((pred - true) / (true + 1e-8)))\n\ndef SMAPE(pred, true):\n    return np.mean(200 * np.abs(pred - true) / (np.abs(pred) + np.abs(true) + 1e-8))\n    # return np.mean(200 * np.abs(pred - true) / (pred + true + 1e-8))\n\ndef ND(pred, true):\n    return np.mean(np.abs(true - pred)) / np.mean(np.abs(true))\n\ndef metric(pred, true):\n    mae = MAE(pred, true)\n    mse = MSE(pred, true)\n    rmse = RMSE(pred, true)\n    mape = MAPE(pred, true)\n    mspe = MSPE(pred, true)\n    smape = SMAPE(pred, true)\n    nd = ND(pred, true)\n\n    return mae, mse, rmse, mape, mspe, smape, nd\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/data_provider/tools.py",
    "content": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nfrom datetime import datetime\nfrom distutils.util import strtobool\nimport pandas as pd\n\nfrom .metrics import metric\n\nplt.switch_backend('agg')\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n    # lr = args.learning_rate * (0.2 ** (epoch // 2))\n    # if args.decay_fac is None:\n    #     args.decay_fac = 0.5\n    # if args.lradj == 'type1':\n    #     lr_adjust = {epoch: args.learning_rate * (args.decay_fac ** ((epoch - 1) // 1))}\n    # elif args.lradj == 'type2':\n    #     lr_adjust = {\n    #         2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,\n    #         10: 5e-7, 15: 1e-7, 20: 5e-8\n    #     }\n    if args.lradj == 'type1':\n        lr_adjust = {epoch: args.learning_rate if epoch < 3 else args.learning_rate * (0.9 ** ((epoch - 3) // 1))}\n    elif args.lradj == 'type2':\n        lr_adjust = {epoch: args.learning_rate * (args.decay_fac ** ((epoch - 1) // 1))}\n    elif args.lradj == 'type4':\n        lr_adjust = {epoch: args.learning_rate * (args.decay_fac ** ((epoch) // 1))}\n    else:\n        args.learning_rate = 1e-4\n        lr_adjust = {epoch: args.learning_rate if epoch < 3 else args.learning_rate * (0.9 ** ((epoch - 3) // 1))}\n    print(\"lr_adjust = {}\".format(lr_adjust))\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group['lr'] = lr\n        print('Updating learning rate to {}'.format(lr))\n\n\nclass EarlyStopping:\n    def __init__(self, patience=7, verbose=False, delta=0):\n        self.patience = patience\n        self.verbose = verbose\n        self.counter = 0\n        self.best_score = None\n        self.early_stop = False\n        self.val_loss_min = np.Inf\n        self.delta = delta\n\n    def __call__(self, val_loss, model, path):\n        score = -val_loss\n        if self.best_score is None:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n        elif score < self.best_score + self.delta:\n            self.counter += 1\n            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n            if self.counter >= self.patience:\n                self.early_stop = True\n        else:\n            self.best_score = score\n            self.save_checkpoint(val_loss, model, path)\n            self.counter = 0\n\n    def save_checkpoint(self, val_loss, model, path):\n        if self.verbose:\n            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')\n        torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')\n        self.val_loss_min = val_loss\n\n\nclass dotdict(dict):\n    \"\"\"dot.notation access to dictionary attributes\"\"\"\n    __getattr__ = dict.get\n    __setattr__ = dict.__setitem__\n    __delattr__ = dict.__delitem__\n\n\nclass StandardScaler():\n    def __init__(self, mean, std):\n        self.mean = mean\n        self.std = std\n\n    def transform(self, data):\n        return (data - self.mean) / self.std\n\n    def inverse_transform(self, data):\n        return (data * self.std) + self.mean\n\n\ndef visual(true, preds=None, name='./pic/test.pdf'):\n    \"\"\"\n    Results visualization\n    \"\"\"\n    plt.figure()\n    plt.plot(true, label='GroundTruth', linewidth=2)\n    if preds is not None:\n        plt.plot(preds, label='Prediction', linewidth=2)\n    plt.legend()\n    plt.savefig(name, bbox_inches='tight')\n\n\ndef convert_tsf_to_dataframe(\n        full_file_path_and_name,\n        replace_missing_vals_with=\"NaN\",\n        value_column_name=\"series_value\",\n):\n    col_names = []\n    col_types = []\n    all_data = {}\n    line_count = 0\n    frequency = None\n    forecast_horizon = None\n    contain_missing_values = None\n    contain_equal_length = None\n    found_data_tag = False\n    found_data_section = False\n    started_reading_data_section = False\n\n    with open(full_file_path_and_name, \"r\", encoding=\"cp1252\") as file:\n        for line in file:\n            # Strip white space from start/end of line\n            line = line.strip()\n\n            if line:\n                if line.startswith(\"@\"):  # Read meta-data\n                    if not line.startswith(\"@data\"):\n                        line_content = line.split(\" \")\n                        if line.startswith(\"@attribute\"):\n                            if (\n                                    len(line_content) != 3\n                            ):  # Attributes have both name and type\n                                raise Exception(\"Invalid meta-data specification.\")\n\n                            col_names.append(line_content[1])\n                            col_types.append(line_content[2])\n                        else:\n                            if (\n                                    len(line_content) != 2\n                            ):  # Other meta-data have only values\n                                raise Exception(\"Invalid meta-data specification.\")\n\n                            if line.startswith(\"@frequency\"):\n                                frequency = line_content[1]\n                            elif line.startswith(\"@horizon\"):\n                                forecast_horizon = int(line_content[1])\n                            elif line.startswith(\"@missing\"):\n                                contain_missing_values = bool(\n                                    strtobool(line_content[1])\n                                )\n                            elif line.startswith(\"@equallength\"):\n                                contain_equal_length = bool(strtobool(line_content[1]))\n\n                    else:\n                        if len(col_names) == 0:\n                            raise Exception(\n                                \"Missing attribute section. Attribute section must come before data.\"\n                            )\n\n                        found_data_tag = True\n                elif not line.startswith(\"#\"):\n                    if len(col_names) == 0:\n                        raise Exception(\n                            \"Missing attribute section. Attribute section must come before data.\"\n                        )\n                    elif not found_data_tag:\n                        raise Exception(\"Missing @data tag.\")\n                    else:\n                        if not started_reading_data_section:\n                            started_reading_data_section = True\n                            found_data_section = True\n                            all_series = []\n\n                            for col in col_names:\n                                all_data[col] = []\n\n                        full_info = line.split(\":\")\n\n                        if len(full_info) != (len(col_names) + 1):\n                            raise Exception(\"Missing attributes/values in series.\")\n\n                        series = full_info[len(full_info) - 1]\n                        series = series.split(\",\")\n\n                        if len(series) == 0:\n                            raise Exception(\n                                \"A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series. Missing values should be indicated with ? symbol\"\n                            )\n\n                        numeric_series = []\n\n                        for val in series:\n                            if val == \"?\":\n                                numeric_series.append(replace_missing_vals_with)\n                            else:\n                                numeric_series.append(float(val))\n\n                        if numeric_series.count(replace_missing_vals_with) == len(\n                                numeric_series\n                        ):\n                            raise Exception(\n                                \"All series values are missing. A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series.\"\n                            )\n\n                        all_series.append(pd.Series(numeric_series).array)\n\n                        for i in range(len(col_names)):\n                            att_val = None\n                            if col_types[i] == \"numeric\":\n                                att_val = int(full_info[i])\n                            elif col_types[i] == \"string\":\n                                att_val = str(full_info[i])\n                            elif col_types[i] == \"date\":\n                                att_val = datetime.strptime(\n                                    full_info[i], \"%Y-%m-%d %H-%M-%S\"\n                                )\n                            else:\n                                raise Exception(\n                                    \"Invalid attribute type.\"\n                                )  # Currently, the code supports only numeric, string and date types. Extend this as required.\n\n                            if att_val is None:\n                                raise Exception(\"Invalid attribute value.\")\n                            else:\n                                all_data[col_names[i]].append(att_val)\n\n                line_count = line_count + 1\n\n        if line_count == 0:\n            raise Exception(\"Empty file.\")\n        if len(col_names) == 0:\n            raise Exception(\"Missing attribute section.\")\n        if not found_data_section:\n            raise Exception(\"Missing series information under data section.\")\n\n        all_data[value_column_name] = all_series\n        loaded_data = pd.DataFrame(all_data)\n\n        return (\n            loaded_data,\n            frequency,\n            forecast_horizon,\n            contain_missing_values,\n            contain_equal_length,\n        )\n\n\ndef vali(model, vali_data, vali_loader, criterion, args, device, itr):\n    total_loss = []\n    if args.model == 'PatchTST' or args.model == 'DLinear' or args.model == 'TCN':\n        model.eval()\n    else:\n        model.in_layer.eval()\n        model.out_layer.eval()\n    with torch.no_grad():\n        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in tqdm(enumerate(vali_loader)):\n            batch_x = batch_x.float().to(device)\n            batch_y = batch_y.float()\n\n            batch_x_mark = batch_x_mark.float().to(device)\n            batch_y_mark = batch_y_mark.float().to(device)\n\n            outputs = model(batch_x, itr)\n\n            # encoder - decoder\n            outputs = outputs[:, -args.pred_len:, :]\n            batch_y = batch_y[:, -args.pred_len:, :].to(device)\n\n            pred = outputs.detach().cpu()\n            true = batch_y.detach().cpu()\n\n            loss = criterion(pred, true)\n\n            total_loss.append(loss)\n    total_loss = np.average(total_loss)\n    if args.model == 'PatchTST' or args.model == 'DLinear' or args.model == 'TCN':\n        model.train()\n    else:\n        model.in_layer.train()\n        model.out_layer.train()\n    return total_loss\n\n\ndef MASE(x, freq, pred, true):\n    masep = np.mean(np.abs(x[:, freq:] - x[:, :-freq]))\n    return np.mean(np.abs(pred - true) / (masep + 1e-8))\n\n\ndef test(model, test_data, test_loader, args, device, itr):\n    preds = []\n    trues = []\n    # mases = []\n\n    model.eval()\n    with torch.no_grad():\n        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in tqdm(enumerate(test_loader)):\n            # outputs_np = batch_x.cpu().numpy()\n            # np.save(\"emb_test/ETTh2_192_test_input_itr{}_{}.npy\".format(itr, i), outputs_np)\n            # outputs_np = batch_y.cpu().numpy()\n            # np.save(\"emb_test/ETTh2_192_test_true_itr{}_{}.npy\".format(itr, i), outputs_np)\n\n            batch_x = batch_x.float().to(device)\n            batch_y = batch_y.float()\n\n            outputs = model(batch_x[:, -args.seq_len:, :], itr)\n\n            # encoder - decoder\n            outputs = outputs[:, -args.pred_len:, :]\n            batch_y = batch_y[:, -args.pred_len:, :].to(device)\n\n            pred = outputs.detach().cpu().numpy()\n            true = batch_y.detach().cpu().numpy()\n\n            preds.append(pred)\n            trues.append(true)\n\n    preds = np.array(preds)\n    trues = np.array(trues)\n    # mases = np.mean(np.array(mases))\n    print('test shape:', preds.shape, trues.shape)\n    preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n    trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n    print('test shape:', preds.shape, trues.shape)\n\n    mae, mse, rmse, mape, mspe, smape, nd = metric(preds, trues)\n    # print('mae:{:.4f}, mse:{:.4f}, rmse:{:.4f}, smape:{:.4f}, mases:{:.4f}'.format(mae, mse, rmse, smape, mases))\n    print('mae:{:.4f}, mse:{:.4f}, rmse:{:.4f}, smape:{:.4f}'.format(mae, mse, rmse, smape))\n\n    return mse, mae\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/data_provider/uea.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport torch\n\n\ndef collate_fn(data, max_len=None):\n    \"\"\"Build mini-batch tensors from a list of (X, mask) tuples. Mask input. Create\n    Args:\n        data: len(batch_size) list of tuples (X, y).\n            - X: torch tensor of shape (seq_length, feat_dim); variable seq_length.\n            - y: torch tensor of shape (num_labels,) : class indices or numerical targets\n                (for classification or regression, respectively). num_labels > 1 for multi-task models\n        max_len: global fixed sequence length. Used for architectures requiring fixed length input,\n            where the batch length cannot vary dynamically. Longer sequences are clipped, shorter are padded with 0s\n    Returns:\n        X: (batch_size, padded_length, feat_dim) torch tensor of masked features (input)\n        targets: (batch_size, padded_length, feat_dim) torch tensor of unmasked features (output)\n        target_masks: (batch_size, padded_length, feat_dim) boolean torch tensor\n            0 indicates masked values to be predicted, 1 indicates unaffected/\"active\" feature values\n        padding_masks: (batch_size, padded_length) boolean tensor, 1 means keep vector at this position, 0 means padding\n    \"\"\"\n\n    batch_size = len(data)\n    features, labels = zip(*data)\n\n    # Stack and pad features and masks (convert 2D to 3D tensors, i.e. add batch dimension)\n    lengths = [X.shape[0] for X in features]  # original sequence length for each time series\n    if max_len is None:\n        max_len = max(lengths)\n\n    X = torch.zeros(batch_size, max_len, features[0].shape[-1])  # (batch_size, padded_length, feat_dim)\n    for i in range(batch_size):\n        end = min(lengths[i], max_len)\n        X[i, :end, :] = features[i][:end, :]\n\n    targets = torch.stack(labels, dim=0)  # (batch_size, num_labels)\n\n    padding_masks = padding_mask(torch.tensor(lengths, dtype=torch.int16),\n                                 max_len=max_len)  # (batch_size, padded_length) boolean tensor, \"1\" means keep\n\n    return X, targets, padding_masks\n\n\ndef padding_mask(lengths, max_len=None):\n    \"\"\"\n    Used to mask padded positions: creates a (batch_size, max_len) boolean mask from a tensor of sequence lengths,\n    where 1 means keep element at this position (time step)\n    \"\"\"\n    batch_size = lengths.numel()\n    max_len = max_len or lengths.max_val()  # trick works because of overloading of 'or' operator for non-boolean types\n    return (torch.arange(0, max_len, device=lengths.device)\n            .type_as(lengths)\n            .repeat(batch_size, 1)\n            .lt(lengths.unsqueeze(1)))\n\n\nclass Normalizer(object):\n    \"\"\"\n    Normalizes dataframe across ALL contained rows (time steps). Different from per-sample normalization.\n    \"\"\"\n\n    def __init__(self, norm_type='standardization', mean=None, std=None, min_val=None, max_val=None):\n        \"\"\"\n        Args:\n            norm_type: choose from:\n                \"standardization\", \"minmax\": normalizes dataframe across ALL contained rows (time steps)\n                \"per_sample_std\", \"per_sample_minmax\": normalizes each sample separately (i.e. across only its own rows)\n            mean, std, min_val, max_val: optional (num_feat,) Series of pre-computed values\n        \"\"\"\n\n        self.norm_type = norm_type\n        self.mean = mean\n        self.std = std\n        self.min_val = min_val\n        self.max_val = max_val\n\n    def normalize(self, df):\n        \"\"\"\n        Args:\n            df: input dataframe\n        Returns:\n            df: normalized dataframe\n        \"\"\"\n        if self.norm_type == \"standardization\":\n            if self.mean is None:\n                self.mean = df.mean()\n                self.std = df.std()\n            return (df - self.mean) / (self.std + np.finfo(float).eps)\n\n        elif self.norm_type == \"minmax\":\n            if self.max_val is None:\n                self.max_val = df.max()\n                self.min_val = df.min()\n            return (df - self.min_val) / (self.max_val - self.min_val + np.finfo(float).eps)\n\n        elif self.norm_type == \"per_sample_std\":\n            grouped = df.groupby(by=df.index)\n            return (df - grouped.transform('mean')) / grouped.transform('std')\n\n        elif self.norm_type == \"per_sample_minmax\":\n            grouped = df.groupby(by=df.index)\n            min_vals = grouped.transform('min')\n            return (df - min_vals) / (grouped.transform('max') - min_vals + np.finfo(float).eps)\n\n        else:\n            raise (NameError(f'Normalize method \"{self.norm_type}\" not implemented'))\n\n\ndef interpolate_missing(y):\n    \"\"\"\n    Replaces NaN values in pd.Series `y` using linear interpolation\n    \"\"\"\n    if y.isna().any():\n        y = y.interpolate(method='linear', limit_direction='both')\n    return y\n\n\ndef subsample(y, limit=256, factor=2):\n    \"\"\"\n    If a given Series is longer than `limit`, returns subsampled sequence by the specified integer factor\n    \"\"\"\n    if len(y) > limit:\n        return y[::factor].reset_index(drop=True)\n    return y\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/datautils.py",
    "content": "import os\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom datetime import datetime\nimport pickle\nfrom ts2vec.utils import pkl_load, pad_nan_to_target\nfrom scipy.io.arff import loadarff\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\ndef load_UCR(dataset):\n    train_file = os.path.join('datasets/UCR', dataset, dataset + \"_TRAIN.tsv\")\n    test_file = os.path.join('datasets/UCR', dataset, dataset + \"_TEST.tsv\")\n    train_df = pd.read_csv(train_file, sep='\\t', header=None)\n    test_df = pd.read_csv(test_file, sep='\\t', header=None)\n    train_array = np.array(train_df)\n    test_array = np.array(test_df)\n\n    # Move the labels to {0, ..., L-1}\n    labels = np.unique(train_array[:, 0])\n    transform = {}\n    for i, l in enumerate(labels):\n        transform[l] = i\n\n    train = train_array[:, 1:].astype(np.float64)\n    train_labels = np.vectorize(transform.get)(train_array[:, 0])\n    test = test_array[:, 1:].astype(np.float64)\n    test_labels = np.vectorize(transform.get)(test_array[:, 0])\n\n    # Normalization for non-normalized datasets\n    # To keep the amplitude information, we do not normalize values over\n    # individual time series, but on the whole dataset\n    if dataset not in [\n        'AllGestureWiimoteX',\n        'AllGestureWiimoteY',\n        'AllGestureWiimoteZ',\n        'BME',\n        'Chinatown',\n        'Crop',\n        'EOGHorizontalSignal',\n        'EOGVerticalSignal',\n        'Fungi',\n        'GestureMidAirD1',\n        'GestureMidAirD2',\n        'GestureMidAirD3',\n        'GesturePebbleZ1',\n        'GesturePebbleZ2',\n        'GunPointAgeSpan',\n        'GunPointMaleVersusFemale',\n        'GunPointOldVersusYoung',\n        'HouseTwenty',\n        'InsectEPGRegularTrain',\n        'InsectEPGSmallTrain',\n        'MelbournePedestrian',\n        'PickupGestureWiimoteZ',\n        'PigAirwayPressure',\n        'PigArtPressure',\n        'PigCVP',\n        'PLAID',\n        'PowerCons',\n        'Rock',\n        'SemgHandGenderCh2',\n        'SemgHandMovementCh2',\n        'SemgHandSubjectCh2',\n        'ShakeGestureWiimoteZ',\n        'SmoothSubspace',\n        'UMD'\n    ]:\n        return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels\n    \n    mean = np.nanmean(train)\n    std = np.nanstd(train)\n    train = (train - mean) / std\n    test = (test - mean) / std\n    return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels\n\n\ndef load_UEA(dataset):\n    train_data = loadarff(f'datasets/UEA/{dataset}/{dataset}_TRAIN.arff')[0]\n    test_data = loadarff(f'datasets/UEA/{dataset}/{dataset}_TEST.arff')[0]\n    \n    def extract_data(data):\n        res_data = []\n        res_labels = []\n        for t_data, t_label in data:\n            t_data = np.array([ d.tolist() for d in t_data ])\n            t_label = t_label.decode(\"utf-8\")\n            res_data.append(t_data)\n            res_labels.append(t_label)\n        return np.array(res_data).swapaxes(1, 2), np.array(res_labels)\n    \n    train_X, train_y = extract_data(train_data)\n    test_X, test_y = extract_data(test_data)\n    \n    scaler = StandardScaler()\n    scaler.fit(train_X.reshape(-1, train_X.shape[-1]))\n    train_X = scaler.transform(train_X.reshape(-1, train_X.shape[-1])).reshape(train_X.shape)\n    test_X = scaler.transform(test_X.reshape(-1, test_X.shape[-1])).reshape(test_X.shape)\n    \n    labels = np.unique(train_y)\n    transform = { k : i for i, k in enumerate(labels)}\n    train_y = np.vectorize(transform.get)(train_y)\n    test_y = np.vectorize(transform.get)(test_y)\n    return train_X, train_y, test_X, test_y\n    \n    \ndef load_forecast_npy(name, univar=False):\n    data = np.load(f'datasets/{name}.npy')    \n    if univar:\n        data = data[: -1:]\n        \n    train_slice = slice(None, int(0.6 * len(data)))\n    valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))\n    test_slice = slice(int(0.8 * len(data)), None)\n    \n    scaler = StandardScaler().fit(data[train_slice])\n    data = scaler.transform(data)\n    data = np.expand_dims(data, 0)\n\n    pred_lens = [24, 48, 96, 288, 672]\n    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, 0\n\n\ndef _get_time_features(dt):\n    return np.stack([\n        dt.minute.to_numpy(),\n        dt.hour.to_numpy(),\n        dt.dayofweek.to_numpy(),\n        dt.day.to_numpy(),\n        dt.dayofyear.to_numpy(),\n        dt.month.to_numpy(),\n        dt.weekofyear.to_numpy(),\n    ], axis=1).astype(np.float)\n\n\ndef load_forecast_csv(name, univar=False):\n    # parser.add_argument('--root_path', type=str, default='/SSD/lz/ts_forecasting_methods/ts2vec/datasets',\n    #                     help='root path of the data file')\n    data = pd.read_csv(f'/SSD/lz/ts_forecasting_methods/ts2vec/datasets/{name}.csv', index_col='date', parse_dates=True)\n\n    print(\"raw dataset.shape = \", data.shape)\n    dt_embed = _get_time_features(data.index)\n    n_covariate_cols = dt_embed.shape[-1]\n    \n    if univar:\n        if name in ('ETTh1', 'ETTh2', 'ETTm1', 'ETTm2'):\n            data = data[['OT']]\n        elif name == 'electricity':\n            data = data[['MT_001']]\n        else:\n            data = data.iloc[:, -1:]\n        \n    data = data.to_numpy()\n    if name == 'ETTh1' or name == 'ETTh2':\n        train_slice = slice(None, 12*30*24)\n        valid_slice = slice(12*30*24, 16*30*24)\n        test_slice = slice(16*30*24, 20*30*24)\n    elif name == 'ETTm1' or name == 'ETTm2':\n        train_slice = slice(None, 12*30*24*4)\n        valid_slice = slice(12*30*24*4, 16*30*24*4)\n        test_slice = slice(16*30*24*4, 20*30*24*4)\n    else:\n        train_slice = slice(None, int(0.6 * len(data)))\n        valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))\n        test_slice = slice(int(0.8 * len(data)), None)\n    \n    scaler = StandardScaler().fit(data[train_slice])\n    print(\"data[train_slice].shape = \", data[train_slice].shape)\n    print(\"pre0 data.shape = \", data.shape)\n    data = scaler.transform(data)\n\n    print(\"pre dataset.shape = \", data.shape)\n    print(\"train val test slice = \", train_slice, valid_slice, test_slice)\n    if name in ('electricity'):\n        data = np.expand_dims(data.T, -1)  # Each variable is an instance rather than a feature\n    else:\n        data = np.expand_dims(data, 0)\n    print(\"pre2 dataset.shape = \", data.shape)\n    if n_covariate_cols > 0:\n        dt_scaler = StandardScaler().fit(dt_embed[train_slice])\n        print(\"dt_embed.shape = \", dt_embed.shape)\n        dt_embed = np.expand_dims(dt_scaler.transform(dt_embed), 0)\n        print(\"22 dt_embed.shape = \", dt_embed.shape)\n        data = np.concatenate([np.repeat(dt_embed, data.shape[0], axis=0), data], axis=-1)\n    print(\"pre3 dt_embed dataset.shape = \", data.shape)\n    if name in ('ETTh1', 'ETTh2', 'electricity'):\n        pred_lens = [24, 48, 168, 336, 720]\n    else:\n        pred_lens = [24, 48, 96, 288, 672]\n        \n    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols\n\n\ndef load_anomaly(name):\n    res = pkl_load(f'datasets/{name}.pkl')\n    return res['all_train_data'], res['all_train_labels'], res['all_train_timestamps'], \\\n           res['all_test_data'],  res['all_test_labels'],  res['all_test_timestamps'], \\\n           res['delay']\n\n\ndef gen_ano_train_data(all_train_data):\n    maxl = np.max([ len(all_train_data[k]) for k in all_train_data ])\n    pretrain_data = []\n    for k in all_train_data:\n        train_data = pad_nan_to_target(all_train_data[k], maxl, axis=0)\n        pretrain_data.append(train_data)\n    pretrain_data = np.expand_dims(np.stack(pretrain_data), 2)\n    return pretrain_data\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/forecasting_datasets_load_test.py",
    "content": "from data_provider.data_factory import data_provider\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\n\nimport os\nimport time\n\nimport warnings\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport argparse\nimport random\n\nwarnings.filterwarnings('ignore')\n\nfix_seed = 2021\nrandom.seed(fix_seed)\ntorch.manual_seed(fix_seed)\nnp.random.seed(fix_seed)\n\nparser = argparse.ArgumentParser(description='GPT4TS')\n\n# basic config\nparser.add_argument('--task_name', type=str, default='long_term_forecast',\n                    help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n\nparser.add_argument('--freq', type=str, default='m',\n                    help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], '\n                         'you can also use more detailed freq like 15min or 3h')\n# parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')\n\n# parser.add_argument('--model_id', type=str, required=True, default='test')\nparser.add_argument('--checkpoints', type=str, default='./checkpoints/')\n\n# forecasting task\nparser.add_argument('--seq_len', type=int, default=96, help='input sequence length')\nparser.add_argument('--label_len', type=int, default=48, help='start token length')\nparser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')\nparser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')\nparser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)\n\nparser.add_argument('--root_path', type=str, default='./datasets/')\nparser.add_argument('--data_path', type=str, default='national_illness.csv')   ## ETTh1 ETTh2 ETTm1  ETTm2 electricity traffic weather exchange_rate national_illness\nparser.add_argument('--data', type=str, default='custom')  ## ett_h  ett_m custom\n\n# 'custom': Dataset_Custom,\n#     'tsf_data': Dataset_TSF,\n#     'ett_h': Dataset_ETT_hour,\n#     'ett_m': Dataset_ETT_minute,\n\nparser.add_argument('--features', type=str, default='M')\n# parser.add_argument('--freq', type=int, default=1)\nparser.add_argument('--target', type=str, default='OT')\nparser.add_argument('--embed', type=str, default='timeF')\nparser.add_argument('--percent', type=int, default=10)\n\n# parser.add_argument('--seq_len', type=int, default=512)\n# parser.add_argument('--pred_len', type=int, default=96)\n# parser.add_argument('--label_len', type=int, default=48)\n\nparser.add_argument('--decay_fac', type=float, default=0.75)\nparser.add_argument('--learning_rate', type=float, default=0.0001)\nparser.add_argument('--batch_size', type=int, default=512)\nparser.add_argument('--num_workers', type=int, default=10)\nparser.add_argument('--train_epochs', type=int, default=10)\nparser.add_argument('--lradj', type=str, default='type1')\nparser.add_argument('--patience', type=int, default=3)\n\nparser.add_argument('--gpt_layers', type=int, default=3)\nparser.add_argument('--is_gpt', type=int, default=1)\nparser.add_argument('--e_layers', type=int, default=3)\nparser.add_argument('--d_model', type=int, default=768)\nparser.add_argument('--n_heads', type=int, default=16)\nparser.add_argument('--d_ff', type=int, default=512)\nparser.add_argument('--dropout', type=float, default=0.2)\nparser.add_argument('--enc_in', type=int, default=862)\nparser.add_argument('--c_out', type=int, default=862)\nparser.add_argument('--patch_size', type=int, default=16)\nparser.add_argument('--kernel_size', type=int, default=25)\n\nparser.add_argument('--loss_func', type=str, default='mse')\nparser.add_argument('--pretrain', type=int, default=1)\nparser.add_argument('--freeze', type=int, default=1)\nparser.add_argument('--model', type=str, default='model')\nparser.add_argument('--stride', type=int, default=8)\nparser.add_argument('--max_len', type=int, default=-1)\nparser.add_argument('--hid_dim', type=int, default=16)\nparser.add_argument('--tmax', type=int, default=10)\n\nparser.add_argument('--itr', type=int, default=3)\nparser.add_argument('--cos', type=int, default=0)\n\n\n\nargs = parser.parse_args()\n\nSEASONALITY_MAP = {\n   \"minutely\": 1440,\n   \"10_minutes\": 144,\n   \"half_hourly\": 48,\n   \"hourly\": 24,\n   \"daily\": 7,\n   \"weekly\": 1,\n   \"monthly\": 12,\n   \"quarterly\": 4,\n   \"yearly\": 1\n}\n\ntrain_data, train_loader = data_provider(args, 'train')\nvali_data, vali_loader = data_provider(args, 'val')\ntest_data, test_loader = data_provider(args, 'test')\n\nprint(\"dataset name = \", args.data_path)\n\nprint(\"type train_data = \", type(train_data))\n\nprint(\"train_data = \", train_data)\nprint(train_data.data_x.shape, train_data.data_y.shape)\n\nprint(\"train_data = \", train_data)\nprint(vali_data.data_x.shape, vali_data.data_y.shape)\n\nprint(\"train_data = \", train_data)\nprint(test_data.data_x.shape, test_data.data_y.shape)\n\n\n#\n# print(\"train_data.shape = \", train_data.shape)\n# print(\"vali_data.shape = \", vali_data.shape)\n# print(\"test_data.shape = \", test_data.shape)"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/models/__init__.py",
    "content": "from .encoder import TSEncoder\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/models/dilated_conv.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass SamePadConv(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1):\n        super().__init__()\n        self.receptive_field = (kernel_size - 1) * dilation + 1\n        padding = self.receptive_field // 2\n        self.conv = nn.Conv1d(\n            in_channels, out_channels, kernel_size,\n            padding=padding,\n            dilation=dilation,\n            groups=groups\n        )\n        self.remove = 1 if self.receptive_field % 2 == 0 else 0\n        \n    def forward(self, x):\n        out = self.conv(x)\n        if self.remove > 0:\n            out = out[:, :, : -self.remove]\n        return out\n    \nclass ConvBlock(nn.Module):\n    def __init__(self, in_channels, out_channels, kernel_size, dilation, final=False):\n        super().__init__()\n        self.conv1 = SamePadConv(in_channels, out_channels, kernel_size, dilation=dilation)\n        self.conv2 = SamePadConv(out_channels, out_channels, kernel_size, dilation=dilation)\n        self.projector = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels or final else None\n    \n    def forward(self, x):\n        residual = x if self.projector is None else self.projector(x)\n        x = F.gelu(x)\n        x = self.conv1(x)\n        x = F.gelu(x)\n        x = self.conv2(x)\n        return x + residual\n\nclass DilatedConvEncoder(nn.Module):\n    def __init__(self, in_channels, channels, kernel_size):\n        super().__init__()\n        self.net = nn.Sequential(*[\n            ConvBlock(\n                channels[i-1] if i > 0 else in_channels,\n                channels[i],\n                kernel_size=kernel_size,\n                dilation=2**i,\n                final=(i == len(channels)-1)\n            )\n            for i in range(len(channels))\n        ])\n        \n    def forward(self, x):\n        return self.net(x)\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/models/encoder.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom .dilated_conv import DilatedConvEncoder\n\ndef generate_continuous_mask(B, T, n=5, l=0.1):\n    res = torch.full((B, T), True, dtype=torch.bool)\n    if isinstance(n, float):\n        n = int(n * T)\n    n = max(min(n, T // 2), 1)\n    \n    if isinstance(l, float):\n        l = int(l * T)\n    l = max(l, 1)\n    \n    for i in range(B):\n        for _ in range(n):\n            t = np.random.randint(T-l+1)\n            res[i, t:t+l] = False\n    return res\n\ndef generate_binomial_mask(B, T, p=0.5):\n    return torch.from_numpy(np.random.binomial(1, p, size=(B, T))).to(torch.bool)\n\nclass TSEncoder(nn.Module):\n    def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, mask_mode='binomial'):\n        super().__init__()\n        self.input_dims = input_dims\n        self.output_dims = output_dims\n        self.hidden_dims = hidden_dims\n        self.mask_mode = mask_mode\n        self.input_fc = nn.Linear(input_dims, hidden_dims)\n        self.feature_extractor = DilatedConvEncoder(\n            hidden_dims,\n            [hidden_dims] * depth + [output_dims],\n            kernel_size=3\n        )\n        self.repr_dropout = nn.Dropout(p=0.1)\n        \n    def forward(self, x, mask=None):  # x: B x T x input_dims\n        nan_mask = ~x.isnan().any(axis=-1)\n        x[~nan_mask] = 0\n        x = self.input_fc(x)  # B x T x Ch\n        \n        # generate & apply mask\n        if mask is None:\n            if self.training:\n                mask = self.mask_mode\n            else:\n                mask = 'all_true'\n        \n        if mask == 'binomial':\n            mask = generate_binomial_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'continuous':\n            mask = generate_continuous_mask(x.size(0), x.size(1)).to(x.device)\n        elif mask == 'all_true':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n        elif mask == 'all_false':\n            mask = x.new_full((x.size(0), x.size(1)), False, dtype=torch.bool)\n        elif mask == 'mask_last':\n            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n            mask[:, -1] = False\n        \n        mask &= nan_mask\n        x[~mask] = 0\n        \n        # conv encoder\n        x = x.transpose(1, 2)  # B x Ch x T\n        x = self.repr_dropout(self.feature_extractor(x))  # B x Co x T\n        x = x.transpose(1, 2)  # B x T x Co\n        \n        return x\n        "
  },
  {
    "path": "ts_forecasting_methods/ts2vec/models/losses.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):\n    loss = torch.tensor(0., device=z1.device)\n    d = 0\n    while z1.size(1) > 1:\n        if alpha != 0:\n            loss += alpha * instance_contrastive_loss(z1, z2)\n        if d >= temporal_unit:\n            if 1 - alpha != 0:\n                loss += (1 - alpha) * temporal_contrastive_loss(z1, z2)\n        d += 1\n        z1 = F.max_pool1d(z1.transpose(1, 2), kernel_size=2).transpose(1, 2)\n        z2 = F.max_pool1d(z2.transpose(1, 2), kernel_size=2).transpose(1, 2)\n    if z1.size(1) == 1:\n        if alpha != 0:\n            loss += alpha * instance_contrastive_loss(z1, z2)\n        d += 1\n    return loss / d\n\ndef instance_contrastive_loss(z1, z2):\n    B, T = z1.size(0), z1.size(1)\n    if B == 1:\n        return z1.new_tensor(0.)\n    z = torch.cat([z1, z2], dim=0)  # 2B x T x C\n    z = z.transpose(0, 1)  # T x 2B x C\n    sim = torch.matmul(z, z.transpose(1, 2))  # T x 2B x 2B\n    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # T x 2B x (2B-1)\n    logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n    logits = -F.log_softmax(logits, dim=-1)\n    \n    i = torch.arange(B, device=z1.device)\n    loss = (logits[:, i, B + i - 1].mean() + logits[:, B + i, i].mean()) / 2\n    return loss\n\ndef temporal_contrastive_loss(z1, z2):\n    B, T = z1.size(0), z1.size(1)\n    if T == 1:\n        return z1.new_tensor(0.)\n    z = torch.cat([z1, z2], dim=1)  # B x 2T x C\n    sim = torch.matmul(z, z.transpose(1, 2))  # B x 2T x 2T\n    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # B x 2T x (2T-1)\n    logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n    logits = -F.log_softmax(logits, dim=-1)\n    \n    t = torch.arange(T, device=z1.device)\n    loss = (logits[:, t, T + t - 1].mean() + logits[:, T + t, t].mean()) / 2\n    return loss\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/requirements.txt",
    "content": "Bottleneck==1.3.2\ntorch==1.8.1\nscipy==1.6.1\nnumpy==1.19.2\nstatsmodels==0.12.2\npandas==1.0.1\nscikit_learn==0.24.2\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/scripts/electricity.sh",
    "content": "# multivar\nfor seed in $(seq 0 4 11 22 43); do\n  python -u train.py electricity forecast_multivar --loader forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\ndone\n\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/scripts/ett.sh",
    "content": "for seed in $(seq 0 4 11 22 43); do\n  # multivar\n  python -u train.py ETTh1 forecast_multivar --loader forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\n  python -u train.py ETTh2 forecast_multivar --loader forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\n  python -u train.py ETTm1 forecast_multivar --loader forecast_csv --repr-dims 320 --max-threads 8 --seed ${seed} --eval\ndone"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/scripts/kpi.sh",
    "content": "python -u train.py kpi anomaly_0 --loader anomaly --repr-dims 320 --max-threads 8 --seed 1 --eval\npython -u train.py kpi anomaly_1 --loader anomaly --repr-dims 320 --max-threads 8 --seed 2 --eval\npython -u train.py kpi anomaly_2 --loader anomaly --repr-dims 320 --max-threads 8 --seed 3 --eval\n\npython -u train.py kpi anomaly_coldstart_0 --loader anomaly_coldstart --repr-dims 320 --max-threads 8 --seed 1 --eval\npython -u train.py kpi anomaly_coldstart_1 --loader anomaly_coldstart --repr-dims 320 --max-threads 8 --seed 2 --eval\npython -u train.py kpi anomaly_coldstart_2 --loader anomaly_coldstart --repr-dims 320 --max-threads 8 --seed 3 --eval\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/scripts/ucr.sh",
    "content": "python -u train.py Chinatown UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SonyAIBORobotSurface1 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ItalyPowerDemand UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MoteStrain UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SonyAIBORobotSurface2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py TwoLeadECG UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SmoothSubspace UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ECGFiveDays UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Fungi UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py CBF UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py BME UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py UMD UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py DiatomSizeReduction UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py DodgerLoopWeekend UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py DodgerLoopGame UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GunPoint UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Coffee UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FaceFour UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FreezerSmallTrain UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ArrowHead UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ECG200 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Symbols UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ShapeletSim UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py InsectEPGSmallTrain UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py BeetleFly UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py BirdChicken UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ToeSegmentation1 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ToeSegmentation2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Wine UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Beef UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Plane UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py OliveOil UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SyntheticControl UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PickupGestureWiimoteZ UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ShakeGestureWiimoteZ UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GunPointMaleVersusFemale UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GunPointAgeSpan UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GunPointOldVersusYoung UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Lightning7 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py DodgerLoopDay UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PowerCons UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FacesUCR UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Meat UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Trace UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MelbournePedestrian UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MiddlePhalanxTW UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py DistalPhalanxOutlineAgeGroup UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MiddlePhalanxOutlineAgeGroup UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ProximalPhalanxTW UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ProximalPhalanxOutlineAgeGroup UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py DistalPhalanxTW UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Herring UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Car UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py InsectEPGRegularTrain UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MedicalImages UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Lightning2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FreezerRegularTrain UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Ham UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MiddlePhalanxOutlineCorrect UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py DistalPhalanxOutlineCorrect UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ProximalPhalanxOutlineCorrect UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Mallat UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py InsectWingbeatSound UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Rock UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GesturePebbleZ1 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SwedishLeaf UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py CinCECGTorso UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GesturePebbleZ2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Adiac UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ECG5000 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py WordSynonyms UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FaceAll UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GestureMidAirD2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GestureMidAirD3 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py GestureMidAirD1 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ChlorineConcentration UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py HouseTwenty UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Fish UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py OSULeaf UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MixedShapesSmallTrain UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py CricketZ UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py CricketX UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py CricketY UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FiftyWords UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Yoga UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py TwoPatterns UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PhalangesOutlinesCorrect UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Strawberry UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ACSF1 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py AllGestureWiimoteY UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py AllGestureWiimoteX UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py AllGestureWiimoteZ UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Wafer UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py WormsTwoClass UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Worms UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Earthquakes UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Haptics UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Computers UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py InlineSkate UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PigArtPressure UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PigCVP UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PigAirwayPressure UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Phoneme UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ScreenType UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py LargeKitchenAppliances UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SmallKitchenAppliances UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py RefrigerationDevices UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py UWaveGestureLibraryZ UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py UWaveGestureLibraryY UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py UWaveGestureLibraryX UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ShapesAll UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Crop UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SemgHandGenderCh2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py EOGVerticalSignal UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py EOGHorizontalSignal UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MixedShapesRegularTrain UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SemgHandMovementCh2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SemgHandSubjectCh2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PLAID UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py UWaveGestureLibraryAll UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ElectricDevices UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py EthanolLevel UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py StarLightCurves UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py NonInvasiveFetalECGThorax1 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py NonInvasiveFetalECGThorax2 UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FordA UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FordB UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py HandOutlines UCR --loader UCR --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/scripts/uea.sh",
    "content": "python -u train.py ERing UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Libras UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py AtrialFibrillation UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py BasicMotions UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py RacketSports UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Handwriting UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Epilepsy UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py JapaneseVowels UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py UWaveGestureLibrary UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PenDigits UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py StandWalkJump UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py NATOPS UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py ArticularyWordRecognition UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FingerMovements UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py LSST UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py HandMovementDirection UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Cricket UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py CharacterTrajectories UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py EthanolConcentration UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SelfRegulationSCP1 UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SelfRegulationSCP2 UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py Heartbeat UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PhonemeSpectra UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py SpokenArabicDigits UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py EigenWorms UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py DuckDuckGeese UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py PEMS-SF UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py FaceDetection UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py MotorImagery UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\npython -u train.py InsectWingbeat UEA --loader UEA --batch-size 8 --repr-dims 320 --max-threads 8 --seed 42 --eval\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/scripts/yahoo.sh",
    "content": "python -u train.py yahoo anomaly_0 --loader anomaly --repr-dims 320 --max-threads 8 --seed 1 --eval\npython -u train.py yahoo anomaly_1 --loader anomaly --repr-dims 320 --max-threads 8 --seed 2 --eval\npython -u train.py yahoo anomaly_2 --loader anomaly --repr-dims 320 --max-threads 8 --seed 3 --eval\n\npython -u train.py yahoo anomaly_coldstart_0 --loader anomaly_coldstart --repr-dims 320 --max-threads 8 --seed 1 --eval\npython -u train.py yahoo anomaly_coldstart_1 --loader anomaly_coldstart --repr-dims 320 --max-threads 8 --seed 2 --eval\npython -u train.py yahoo anomaly_coldstart_2 --loader anomaly_coldstart --repr-dims 320 --max-threads 8 --seed 3 --eval\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/tasks/__init__.py",
    "content": "from .classification import eval_classification\nfrom .forecasting import eval_forecasting\nfrom .anomaly_detection import eval_anomaly_detection, eval_anomaly_detection_coldstart\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/tasks/_eval_protocols.py",
    "content": "import numpy as np\nfrom sklearn.linear_model import Ridge\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import GridSearchCV, train_test_split\n\ndef fit_svm(features, y, MAX_SAMPLES=10000):\n    nb_classes = np.unique(y, return_counts=True)[1].shape[0]\n    train_size = features.shape[0]\n\n    svm = SVC(C=np.inf, gamma='scale')\n    if train_size // nb_classes < 5 or train_size < 50:\n        return svm.fit(features, y)\n    else:\n        grid_search = GridSearchCV(\n            svm, {\n                'C': [\n                    0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000,\n                    np.inf\n                ],\n                'kernel': ['rbf'],\n                'degree': [3],\n                'gamma': ['scale'],\n                'coef0': [0],\n                'shrinking': [True],\n                'probability': [False],\n                'tol': [0.001],\n                'cache_size': [200],\n                'class_weight': [None],\n                'verbose': [False],\n                'max_iter': [10000000],\n                'decision_function_shape': ['ovr'],\n                'random_state': [None]\n            },\n            cv=5, n_jobs=5\n        )\n        # If the training set is too large, subsample MAX_SAMPLES examples\n        if train_size > MAX_SAMPLES:\n            split = train_test_split(\n                features, y,\n                train_size=MAX_SAMPLES, random_state=0, stratify=y\n            )\n            features = split[0]\n            y = split[2]\n            \n        grid_search.fit(features, y)\n        return grid_search.best_estimator_\n\ndef fit_lr(features, y, MAX_SAMPLES=100000):\n    # If the training set is too large, subsample MAX_SAMPLES examples\n    if features.shape[0] > MAX_SAMPLES:\n        split = train_test_split(\n            features, y,\n            train_size=MAX_SAMPLES, random_state=0, stratify=y\n        )\n        features = split[0]\n        y = split[2]\n        \n    pipe = make_pipeline(\n        StandardScaler(),\n        LogisticRegression(\n            random_state=0,\n            max_iter=1000000,\n            multi_class='ovr'\n        )\n    )\n    pipe.fit(features, y)\n    return pipe\n\ndef fit_knn(features, y):\n    pipe = make_pipeline(\n        StandardScaler(),\n        KNeighborsClassifier(n_neighbors=1)\n    )\n    pipe.fit(features, y)\n    return pipe\n\ndef fit_ridge(train_features, train_y, valid_features, valid_y, MAX_SAMPLES=100000):\n    # If the training set is too large, subsample MAX_SAMPLES examples\n    if train_features.shape[0] > MAX_SAMPLES:\n        split = train_test_split(\n            train_features, train_y,\n            train_size=MAX_SAMPLES, random_state=0\n        )\n        train_features = split[0]\n        train_y = split[2]\n    if valid_features.shape[0] > MAX_SAMPLES:\n        split = train_test_split(\n            valid_features, valid_y,\n            train_size=MAX_SAMPLES, random_state=0\n        )\n        valid_features = split[0]\n        valid_y = split[2]\n    \n    alphas = [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]\n    valid_results = []\n    for alpha in alphas:\n        lr = Ridge(alpha=alpha).fit(train_features, train_y)\n        valid_pred = lr.predict(valid_features)\n        score = np.sqrt(((valid_pred - valid_y) ** 2).mean()) + np.abs(valid_pred - valid_y).mean()\n        valid_results.append(score)\n    best_alpha = alphas[np.argmin(valid_results)]\n    \n    lr = Ridge(alpha=best_alpha)\n    lr.fit(train_features, train_y)\n    return lr\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/tasks/anomaly_detection.py",
    "content": "import numpy as np\nimport time\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport bottleneck as bn\n\n# consider delay threshold and missing segments\ndef get_range_proba(predict, label, delay=7):\n    splits = np.where(label[1:] != label[:-1])[0] + 1\n    is_anomaly = label[0] == 1\n    new_predict = np.array(predict)\n    pos = 0\n\n    for sp in splits:\n        if is_anomaly:\n            if 1 in predict[pos:min(pos + delay + 1, sp)]:\n                new_predict[pos: sp] = 1\n            else:\n                new_predict[pos: sp] = 0\n        is_anomaly = not is_anomaly\n        pos = sp\n    sp = len(label)\n\n    if is_anomaly:  # anomaly in the end\n        if 1 in predict[pos: min(pos + delay + 1, sp)]:\n            new_predict[pos: sp] = 1\n        else:\n            new_predict[pos: sp] = 0\n\n    return new_predict\n\n\n# set missing = 0\ndef reconstruct_label(timestamp, label):\n    timestamp = np.asarray(timestamp, np.int64)\n    index = np.argsort(timestamp)\n\n    timestamp_sorted = np.asarray(timestamp[index])\n    interval = np.min(np.diff(timestamp_sorted))\n\n    label = np.asarray(label, np.int64)\n    label = np.asarray(label[index])\n\n    idx = (timestamp_sorted - timestamp_sorted[0]) // interval\n\n    new_label = np.zeros(shape=((timestamp_sorted[-1] - timestamp_sorted[0]) // interval + 1,), dtype=np.int)\n    new_label[idx] = label\n\n    return new_label\n\n\ndef eval_ad_result(test_pred_list, test_labels_list, test_timestamps_list, delay):\n    labels = []\n    pred = []\n    for test_pred, test_labels, test_timestamps in zip(test_pred_list, test_labels_list, test_timestamps_list):\n        assert test_pred.shape == test_labels.shape == test_timestamps.shape\n        test_labels = reconstruct_label(test_timestamps, test_labels)\n        test_pred = reconstruct_label(test_timestamps, test_pred)\n        test_pred = get_range_proba(test_pred, test_labels, delay)\n        labels.append(test_labels)\n        pred.append(test_pred)\n    labels = np.concatenate(labels)\n    pred = np.concatenate(pred)\n    return {\n        'f1': f1_score(labels, pred),\n        'precision': precision_score(labels, pred),\n        'recall': recall_score(labels, pred)\n    }\n\n\ndef np_shift(arr, num, fill_value=np.nan):\n    result = np.empty_like(arr)\n    if num > 0:\n        result[:num] = fill_value\n        result[num:] = arr[:-num]\n    elif num < 0:\n        result[num:] = fill_value\n        result[:num] = arr[-num:]\n    else:\n        result[:] = arr\n    return result\n\n\ndef eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):\n    t = time.time()\n    \n    all_train_repr = {}\n    all_test_repr = {}\n    all_train_repr_wom = {}\n    all_test_repr_wom = {}\n    for k in all_train_data:\n        train_data = all_train_data[k]\n        test_data = all_test_data[k]\n\n        full_repr = model.encode(\n            np.concatenate([train_data, test_data]).reshape(1, -1, 1),\n            mask='mask_last',\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_train_repr[k] = full_repr[:len(train_data)]\n        all_test_repr[k] = full_repr[len(train_data):]\n\n        full_repr_wom = model.encode(\n            np.concatenate([train_data, test_data]).reshape(1, -1, 1),\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_train_repr_wom[k] = full_repr_wom[:len(train_data)]\n        all_test_repr_wom[k] = full_repr_wom[len(train_data):]\n        \n    res_log = []\n    labels_log = []\n    timestamps_log = []\n    for k in all_train_data:\n        train_data = all_train_data[k]\n        train_labels = all_train_labels[k]\n        train_timestamps = all_train_timestamps[k]\n\n        test_data = all_test_data[k]\n        test_labels = all_test_labels[k]\n        test_timestamps = all_test_timestamps[k]\n\n        train_err = np.abs(all_train_repr_wom[k] - all_train_repr[k]).sum(axis=1)\n        test_err = np.abs(all_test_repr_wom[k] - all_test_repr[k]).sum(axis=1)\n\n        ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)\n        train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]\n        test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]\n        train_err_adj = train_err_adj[22:]\n\n        thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)\n        test_res = (test_err_adj > thr) * 1\n\n        for i in range(len(test_res)):\n            if i >= delay and test_res[i-delay:i].sum() >= 1:\n                test_res[i] = 0\n\n        res_log.append(test_res)\n        labels_log.append(test_labels)\n        timestamps_log.append(test_timestamps)\n    t = time.time() - t\n    \n    eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay)\n    eval_res['infer_time'] = t\n    return res_log, eval_res\n\n\ndef eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):\n    t = time.time()\n    \n    all_data = {}\n    all_repr = {}\n    all_repr_wom = {}\n    for k in all_train_data:\n        all_data[k] = np.concatenate([all_train_data[k], all_test_data[k]])\n        all_repr[k] = model.encode(\n            all_data[k].reshape(1, -1, 1),\n            mask='mask_last',\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        all_repr_wom[k] = model.encode(\n            all_data[k].reshape(1, -1, 1),\n            casual=True,\n            sliding_length=1,\n            sliding_padding=200,\n            batch_size=256\n        ).squeeze()\n        \n    res_log = []\n    labels_log = []\n    timestamps_log = []\n    for k in all_data:\n        data = all_data[k]\n        labels = np.concatenate([all_train_labels[k], all_test_labels[k]])\n        timestamps = np.concatenate([all_train_timestamps[k], all_test_timestamps[k]])\n        \n        err = np.abs(all_repr_wom[k] - all_repr[k]).sum(axis=1)\n        ma = np_shift(bn.move_mean(err, 21), 1)\n        err_adj = (err - ma) / ma\n        \n        MIN_WINDOW = len(data) // 10\n        thr = bn.move_mean(err_adj, len(err_adj), MIN_WINDOW) + 4 * bn.move_std(err_adj, len(err_adj), MIN_WINDOW)\n        res = (err_adj > thr) * 1\n        \n        for i in range(len(res)):\n            if i >= delay and res[i-delay:i].sum() >= 1:\n                res[i] = 0\n\n        res_log.append(res[MIN_WINDOW:])\n        labels_log.append(labels[MIN_WINDOW:])\n        timestamps_log.append(timestamps[MIN_WINDOW:])\n    t = time.time() - t\n    \n    eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay)\n    eval_res['infer_time'] = t\n    return res_log, eval_res\n\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/tasks/classification.py",
    "content": "import numpy as np\nfrom . import _eval_protocols as eval_protocols\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.metrics import average_precision_score\n\ndef eval_classification(model, train_data, train_labels, test_data, test_labels, eval_protocol='linear'):\n    assert train_labels.ndim == 1 or train_labels.ndim == 2\n    train_repr = model.encode(train_data, encoding_window='full_series' if train_labels.ndim == 1 else None)\n    test_repr = model.encode(test_data, encoding_window='full_series' if train_labels.ndim == 1 else None)\n\n    if eval_protocol == 'linear':\n        fit_clf = eval_protocols.fit_lr\n    elif eval_protocol == 'svm':\n        fit_clf = eval_protocols.fit_svm\n    elif eval_protocol == 'knn':\n        fit_clf = eval_protocols.fit_knn\n    else:\n        assert False, 'unknown evaluation protocol'\n\n    def merge_dim01(array):\n        return array.reshape(array.shape[0]*array.shape[1], *array.shape[2:])\n\n    if train_labels.ndim == 2:\n        train_repr = merge_dim01(train_repr)\n        train_labels = merge_dim01(train_labels)\n        test_repr = merge_dim01(test_repr)\n        test_labels = merge_dim01(test_labels)\n\n    clf = fit_clf(train_repr, train_labels)\n\n    acc = clf.score(test_repr, test_labels)\n    if eval_protocol == 'linear':\n        y_score = clf.predict_proba(test_repr)\n    else:\n        y_score = clf.decision_function(test_repr)\n    test_labels_onehot = label_binarize(test_labels, classes=np.arange(train_labels.max()+1))\n    auprc = average_precision_score(test_labels_onehot, y_score)\n    \n    return y_score, { 'acc': acc, 'auprc': auprc }\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/tasks/forecasting.py",
    "content": "import numpy as np\nimport time\nfrom . import _eval_protocols as eval_protocols\n\ndef generate_pred_samples(features, data, pred_len, drop=0):\n    n = data.shape[1]\n    features = features[:, :-pred_len]\n    labels = np.stack([ data[:, i:1+n+i-pred_len] for i in range(pred_len)], axis=2)[:, 1:]\n    features = features[:, drop:]\n    labels = labels[:, drop:]\n    return features.reshape(-1, features.shape[-1]), \\\n            labels.reshape(-1, labels.shape[2]*labels.shape[3])\n\ndef cal_metrics(pred, target):\n    return {\n        'MSE': ((pred - target) ** 2).mean(),\n        'MAE': np.abs(pred - target).mean()\n    }\n    \ndef eval_forecasting(model, data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols):\n    padding = 200\n    \n    t = time.time()\n    all_repr = model.encode(\n        data,\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=256\n    )\n    ts2vec_infer_time = time.time() - t\n\n    print(\"all_repr.shape = \", all_repr.shape)\n    \n    train_repr = all_repr[:, train_slice]\n    valid_repr = all_repr[:, valid_slice]\n    test_repr = all_repr[:, test_slice]\n    print(\"train_repr.shape = \", train_repr.shape, \", valid_repr.shape = \", valid_repr.shape, \", test_repr.shape = \", test_repr.shape)\n\n    print(\"n_covariate_cols = \", n_covariate_cols)\n    \n    train_data = data[:, train_slice, n_covariate_cols:]\n    valid_data = data[:, valid_slice, n_covariate_cols:]\n    test_data = data[:, test_slice, n_covariate_cols:]\n\n    print(\"train_data.shape = \", train_data.shape, \", valid_data.shape = \", valid_data.shape, \", test_data.shape = \",\n          test_data.shape)\n\n    ours_result = {}\n    lr_train_time = {}\n    lr_infer_time = {}\n    out_log = {}\n    for pred_len in pred_lens:\n        train_features, train_labels = generate_pred_samples(train_repr, train_data, pred_len, drop=padding)\n        valid_features, valid_labels = generate_pred_samples(valid_repr, valid_data, pred_len)\n        test_features, test_labels = generate_pred_samples(test_repr, test_data, pred_len)\n        \n        t = time.time()\n        lr = eval_protocols.fit_ridge(train_features, train_labels, valid_features, valid_labels)\n        lr_train_time[pred_len] = time.time() - t\n        \n        t = time.time()\n        test_pred = lr.predict(test_features)\n        lr_infer_time[pred_len] = time.time() - t\n\n        ori_shape = test_data.shape[0], -1, pred_len, test_data.shape[2]\n        test_pred = test_pred.reshape(ori_shape)\n        test_labels = test_labels.reshape(ori_shape)\n        \n        # if test_data.shape[0] > 1:\n        #     test_pred_inv = scaler.inverse_transform(test_pred.swapaxes(0, 3)).swapaxes(0, 3)\n        #     test_labels_inv = scaler.inverse_transform(test_labels.swapaxes(0, 3)).swapaxes(0, 3)\n        # else:\n        #     print(\"test_pred.shape = \", test_pred.shape, \", test_labels.shape = \", test_labels.shape)\n        #     print(\"test_pred.swapaxes(0, 3).shape = \", test_pred.swapaxes(0, 3).shape)\n        #     test_pred_inv = scaler.inverse_transform(test_pred)\n        #     test_labels_inv = scaler.inverse_transform(test_labels)\n            \n        out_log[pred_len] = {\n            'norm': test_pred,\n            # 'raw': test_pred_inv,\n            'norm_gt': test_labels,\n            # 'raw_gt': test_labels_inv\n        }\n        ours_result[pred_len] = {\n            'norm': cal_metrics(test_pred, test_labels),\n            # 'raw': cal_metrics(test_pred_inv, test_labels_inv)\n        }\n        \n    eval_res = {\n        'ours': ours_result,\n        'ts2vec_infer_time': ts2vec_infer_time,\n        'lr_train_time': lr_train_time,\n        'lr_infer_time': lr_infer_time\n    }\n    return out_log, eval_res\n\n\ndef eval_forecasting_new(model, train_data, valid_data, test_data, pred_lens):\n    padding = 200\n\n    t = time.time()\n    train_repr = model.encode(\n        train_data,\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=256\n    )\n    valid_repr = model.encode(\n        valid_data,\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=256\n    )\n    test_repr = model.encode(\n        test_data,\n        casual=True,\n        sliding_length=1,\n        sliding_padding=padding,\n        batch_size=256\n    )\n    ts2vec_infer_time = time.time() - t\n\n    print(\"train_data.shape = \", train_data.shape)\n\n    # train_repr = all_repr[:, train_slice]\n    # valid_repr = all_repr[:, valid_slice]\n    # test_repr = all_repr[:, test_slice]\n    # print(\"train_repr.shape = \", train_repr.shape, \", valid_repr.shape = \", valid_repr.shape, \", test_repr.shape = \",\n    #       test_repr.shape)\n    #\n    # print(\"n_covariate_cols = \", n_covariate_cols)\n    #\n    # train_data = data[:, train_slice, n_covariate_cols:]\n    # valid_data = data[:, valid_slice, n_covariate_cols:]\n    # test_data = data[:, test_slice, n_covariate_cols:]\n\n    print(\"train_data.shape = \", train_data.shape, \", valid_data.shape = \", valid_data.shape, \", test_data.shape = \",\n          test_data.shape)\n\n    ours_result = {}\n    lr_train_time = {}\n    lr_infer_time = {}\n    out_log = {}\n    for pred_len in pred_lens:\n        train_features, train_labels = generate_pred_samples(train_repr, train_data, pred_len, drop=padding)\n        valid_features, valid_labels = generate_pred_samples(valid_repr, valid_data, pred_len)\n        test_features, test_labels = generate_pred_samples(test_repr, test_data, pred_len)\n\n        t = time.time()\n        lr = eval_protocols.fit_ridge(train_features, train_labels, valid_features, valid_labels)\n        lr_train_time[pred_len] = time.time() - t\n\n        t = time.time()\n        test_pred = lr.predict(test_features)\n        lr_infer_time[pred_len] = time.time() - t\n\n        ori_shape = test_data.shape[0], -1, pred_len, test_data.shape[2]\n        test_pred = test_pred.reshape(ori_shape)\n        test_labels = test_labels.reshape(ori_shape)\n\n        # if test_data.shape[0] > 1:\n        #     test_pred_inv = scaler.inverse_transform(test_pred.swapaxes(0, 3)).swapaxes(0, 3)\n        #     test_labels_inv = scaler.inverse_transform(test_labels.swapaxes(0, 3)).swapaxes(0, 3)\n        # else:\n        #     print(\"test_pred.shape = \", test_pred.shape, \", test_labels.shape = \", test_labels.shape)\n        #     print(\"test_pred.swapaxes(0, 3).shape = \", test_pred.swapaxes(0, 3).shape)\n        #     test_pred_inv = scaler.inverse_transform(test_pred)\n        #     test_labels_inv = scaler.inverse_transform(test_labels)\n\n        out_log[pred_len] = {\n            'norm': test_pred,\n            # 'raw': test_pred_inv,\n            'norm_gt': test_labels,\n            # 'raw_gt': test_labels_inv\n        }\n        ours_result[pred_len] = {\n            'norm': cal_metrics(test_pred, test_labels),\n            # 'raw': cal_metrics(test_pred_inv, test_labels_inv)\n        }\n\n    eval_res = {\n        'ours': ours_result,\n        'ts2vec_infer_time': ts2vec_infer_time,\n        'lr_train_time': lr_train_time,\n        'lr_infer_time': lr_infer_time\n    }\n    return out_log, eval_res\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/train.py",
    "content": "import torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom ts2vec import TS2Vec\nimport tasks\nimport datautils\nfrom utils import init_dl_program, name_with_datetime, pkl_save, data_dropout\n\ndef save_checkpoint_callback(\n    save_every=1,\n    unit='epoch'\n):\n    assert unit in ('epoch', 'iter')\n    def callback(model, loss):\n        n = model.n_epochs if unit == 'epoch' else model.n_iters\n        if n % save_every == 0:\n            model.save(f'{run_dir}/model_{n}.pkl')\n    return callback\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    # parser.add_argument('dataset', help='The dataset name')\n    parser.add_argument('--dataset', default='ETTh1', help='The dataset name')  ## 'ETTh1', 'ETTh2', 'electricity'  ETTm1\n    # parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    parser.add_argument('--run_name', default='ts2Vec',\n                        help='The folder name used to save model, output and evaluation metrics. This can be set to any word')\n    # parser.add_argument('--loader', type=str, required=True, help='The data loader used to load the experimental data. This can be set to UCR, '\n    #                                                               'UEA, forecast_csv, forecast_csv_univar, anomaly, or anomaly_coldstart')\n    parser.add_argument('--loader', type=str, default='forecast_csv',\n                        help='The data loader used to load the experimental data.')  ## forecast_csv forecast_csv_univar\n    parser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')\n    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')\n    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')\n    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')\n    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')\n    parser.add_argument('--iters', type=int, default=None, help='The number of iterations')\n    parser.add_argument('--epochs', type=int, default=None, help='The number of epochs')\n    parser.add_argument('--save-every', type=int, default=None, help='Save the checkpoint every <save_every> iterations/epochs')\n    parser.add_argument('--seed', type=int, default=None, help='The random seed')\n    parser.add_argument('--max-threads', type=int, default=6, help='The maximum allowed number of threads used by this process')\n    # parser.add_argument('--eval', action=\"store_true\", help='Whether to perform evaluation after training')\n    parser.add_argument('--eval', default=True,\n                        help='Whether to perform evaluation after training')  ## action=\"store_true\"\n    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')\n\n    parser.add_argument('--save_dir', type=str, default='/dev_data/lz/ts_forecasting_methods/result/')\n    parser.add_argument('--save_csv_name', type=str, default='ts2vec_forecasting_0724.csv')\n\n    args = parser.parse_args()\n    \n    print(\"Dataset:\", args.dataset)\n    print(\"Arguments:\", str(args))\n\n    # 检查路径是否存在，如果不存在则赋值为新的路径\n    if not os.path.exists(args.save_dir):\n        args.save_dir = '/SSD/lz/ts_forecasting_methods/result/'\n\n    print(\"save_dir = \", args.save_dir)  # 输出检查\n    \n    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)\n    \n    print('Loading data... ', end='')\n    if args.loader == 'UCR':\n        task_type = 'classification'\n        train_data, train_labels, test_data, test_labels = datautils.load_UCR(args.dataset)\n        \n    elif args.loader == 'UEA':\n        task_type = 'classification'\n        train_data, train_labels, test_data, test_labels = datautils.load_UEA(args.dataset)\n        \n    elif args.loader == 'forecast_csv':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(args.dataset)\n        train_data = data[:, train_slice]\n        \n    elif args.loader == 'forecast_csv_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_csv(args.dataset, univar=True)\n        train_data = data[:, train_slice]\n        print(\"raw data.shape = \", data.shape, \", train_data.shape = \", train_data.shape)\n        \n    elif args.loader == 'forecast_npy':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(args.dataset)\n        train_data = data[:, train_slice]\n        \n    elif args.loader == 'forecast_npy_univar':\n        task_type = 'forecasting'\n        data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = datautils.load_forecast_npy(args.dataset, univar=True)\n        train_data = data[:, train_slice]\n        \n    elif args.loader == 'anomaly':\n        task_type = 'anomaly_detection'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data = datautils.gen_ano_train_data(all_train_data)\n        \n    elif args.loader == 'anomaly_coldstart':\n        task_type = 'anomaly_detection_coldstart'\n        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)\n        train_data, _, _, _ = datautils.load_UCR('FordA')\n        \n    else:\n        raise ValueError(f\"Unknown loader {args.loader}.\")\n        \n        \n    if args.irregular > 0:\n        if task_type == 'classification':\n            train_data = data_dropout(train_data, args.irregular)\n            test_data = data_dropout(test_data, args.irregular)\n        else:\n            raise ValueError(f\"Task type {task_type} is not supported when irregular>0.\")\n    print('done')\n    print(\"output_dims=args.repr_dims = \", args.repr_dims, \", input_dims = \", train_data.shape[-1])\n    config = dict(\n        batch_size=args.batch_size,\n        lr=args.lr,\n        output_dims=args.repr_dims,\n        max_train_length=args.max_train_length\n    )\n    \n    if args.save_every is not None:\n        unit = 'epoch' if args.epochs is not None else 'iter'\n        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)\n\n    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)\n    os.makedirs(run_dir, exist_ok=True)\n    \n    t = time.time()\n    \n    model = TS2Vec(\n        input_dims=train_data.shape[-1],\n        device=device,\n        **config\n    )\n    loss_log = model.fit(\n        train_data,\n        n_epochs=args.epochs,\n        n_iters=args.iters,\n        verbose=True\n    )\n    model.save(f'{run_dir}/model.pkl')\n\n    t = time.time() - t\n    print(f\"\\nTraining time: {datetime.timedelta(seconds=t)}\\n\")\n\n    if args.eval:\n        if task_type == 'classification':\n            out, eval_res = tasks.eval_classification(model, train_data, train_labels, test_data, test_labels, eval_protocol='svm')\n        elif task_type == 'forecasting':\n            print(\"\")\n            out, eval_res = tasks.eval_forecasting(model, data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols)\n\n            # print(\"ts2vec out = \", out)\n            print(\"ts2vec eval_res = \", eval_res)\n\n            end_result = {}\n            end_result['dataset'] = args.dataset\n            end_result['24_MSE'] = eval_res['ours'][24]['norm']['MSE']\n            end_result['24_MAE'] = eval_res['ours'][24]['norm']['MAE']\n\n            end_result['48_MSE'] = eval_res['ours'][48]['norm']['MSE']\n            end_result['48_MAE'] = eval_res['ours'][48]['norm']['MAE']\n\n            if args.dataset == 'ETTm1':\n                end_result['168_MSE'] = eval_res['ours'][96]['norm']['MSE']\n                end_result['168_MAE'] = eval_res['ours'][96]['norm']['MAE']\n\n                end_result['336_MSE'] = eval_res['ours'][288]['norm']['MSE']\n                end_result['336_MAE'] = eval_res['ours'][288]['norm']['MAE']\n\n                end_result['720_MSE'] = eval_res['ours'][672]['norm']['MSE']\n                end_result['720_MAE'] = eval_res['ours'][672]['norm']['MAE']\n            else:\n\n                end_result['168_MSE'] = eval_res['ours'][168]['norm']['MSE']\n                end_result['168_MAE'] = eval_res['ours'][168]['norm']['MAE']\n\n                end_result['336_MSE'] = eval_res['ours'][336]['norm']['MSE']\n                end_result['336_MAE'] = eval_res['ours'][336]['norm']['MAE']\n\n                end_result['720_MSE'] = eval_res['ours'][720]['norm']['MSE']\n                end_result['720_MAE'] = eval_res['ours'][720]['norm']['MAE']\n\n            import pandas as pd\n\n            # 转换字典为 DataFrame\n            df = pd.DataFrame([eval_res])\n            # 指定保存路径\n            save_path = args.save_dir + args.save_csv_name\n\n            # 转换字典为 DataFrame\n            df_new = pd.DataFrame([end_result])\n\n            # 检查文件是否存在\n            if os.path.exists(save_path):\n                # 文件存在，读取现有数据\n                df_existing = pd.read_csv(save_path, index_col=0)\n                # 将新数据附加到现有数据框中\n                df_combined = pd.concat([df_existing, df_new], ignore_index=True)\n            else:\n                # 文件不存在，创建新的数据框\n                df_combined = df_new\n\n            # 保存 DataFrame 为 CSV 文件\n            df_combined.to_csv(save_path, index=True, index_label=\"id\")\n\n            print(\"Save success!!!\")\n\n\n\n\n\n        elif task_type == 'anomaly_detection':\n            out, eval_res = tasks.eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        elif task_type == 'anomaly_detection_coldstart':\n            out, eval_res = tasks.eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)\n        else:\n            assert False\n        pkl_save(f'{run_dir}/out.pkl', out)\n        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)\n        print('Evaluation result:', eval_res)\n\n    print(\"Finished.\")\n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/ts2vec.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nfrom ts2vec.models import TSEncoder\nfrom ts2vec.models.losses import hierarchical_contrastive_loss\nfrom ts2vec.utils import take_per_row, split_with_nan, centerize_vary_length_series, torch_pad_nan\nimport math\n\nclass TS2Vec:\n    '''The TS2Vec model'''\n    \n    def __init__(\n        self,\n        input_dims,\n        output_dims=320,\n        hidden_dims=64,\n        depth=10,\n        device='cuda',\n        lr=0.001,\n        batch_size=16,\n        max_train_length=None,\n        temporal_unit=0,\n        after_iter_callback=None,\n        after_epoch_callback=None\n    ):\n        ''' Initialize a TS2Vec model.\n        \n        Args:\n            input_dims (int): The input dimension. For a univariate time series, this should be set to 1.\n            output_dims (int): The representation dimension.\n            hidden_dims (int): The hidden dimension of the encoder.\n            depth (int): The number of hidden residual blocks in the encoder.\n            device (int): The gpu used for training and inference.\n            lr (int): The learning rate.\n            batch_size (int): The batch size.\n            max_train_length (Union[int, NoneType]): The maximum allowed sequence length for training. For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length>.\n            temporal_unit (int): The minimum unit to perform temporal contrast. When training on a very long sequence, this param helps to reduce the cost of time and memory.\n            after_iter_callback (Union[Callable, NoneType]): A callback function that would be called after each iteration.\n            after_epoch_callback (Union[Callable, NoneType]): A callback function that would be called after each epoch.\n        '''\n        \n        super().__init__()\n        self.device = device\n        self.lr = lr\n        self.batch_size = batch_size\n        self.max_train_length = max_train_length\n        self.temporal_unit = temporal_unit\n        \n        self._net = TSEncoder(input_dims=input_dims, output_dims=output_dims, hidden_dims=hidden_dims, depth=depth).to(self.device)\n        self.net = torch.optim.swa_utils.AveragedModel(self._net)\n        self.net.update_parameters(self._net)\n        \n        self.after_iter_callback = after_iter_callback\n        self.after_epoch_callback = after_epoch_callback\n        \n        self.n_epochs = 0\n        self.n_iters = 0\n    \n    def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):\n        ''' Training the TS2Vec model.\n        \n        Args:\n            train_data (numpy.ndarray): The training data. It should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            n_epochs (Union[int, NoneType]): The number of epochs. When this reaches, the training stops.\n            n_iters (Union[int, NoneType]): The number of iterations. When this reaches, the training stops. If both n_epochs and n_iters are not specified, a default setting would be used that sets n_iters to 200 for a dataset with size <= 100000, 600 otherwise.\n            verbose (bool): Whether to print the training loss after each epoch.\n            \n        Returns:\n            loss_log: a list containing the training losses on each epoch.\n        '''\n        assert train_data.ndim == 3\n        \n        if n_iters is None and n_epochs is None:\n            n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters\n        \n        if self.max_train_length is not None:\n            sections = train_data.shape[1] // self.max_train_length\n            if sections >= 2:\n                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)\n\n        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0)\n        if temporal_missing[0] or temporal_missing[-1]:\n            train_data = centerize_vary_length_series(train_data)\n                \n        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)]\n        \n        train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))\n        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)\n        \n        optimizer = torch.optim.AdamW(self._net.parameters(), lr=self.lr)\n        \n        loss_log = []\n        \n        while True:\n            if n_epochs is not None and self.n_epochs >= n_epochs:\n                break\n            \n            cum_loss = 0\n            n_epoch_iters = 0\n            \n            interrupted = False\n            for batch in train_loader:\n                if n_iters is not None and self.n_iters >= n_iters:\n                    interrupted = True\n                    break\n                \n                x = batch[0]\n                if self.max_train_length is not None and x.size(1) > self.max_train_length:\n                    window_offset = np.random.randint(x.size(1) - self.max_train_length + 1)\n                    x = x[:, window_offset : window_offset + self.max_train_length]\n                x = x.to(self.device)\n                \n                ts_l = x.size(1)\n                crop_l = np.random.randint(low=2 ** (self.temporal_unit + 1), high=ts_l+1)\n                crop_left = np.random.randint(ts_l - crop_l + 1)\n                crop_right = crop_left + crop_l\n                crop_eleft = np.random.randint(crop_left + 1)\n                crop_eright = np.random.randint(low=crop_right, high=ts_l + 1)\n                crop_offset = np.random.randint(low=-crop_eleft, high=ts_l - crop_eright + 1, size=x.size(0))\n                \n                optimizer.zero_grad()\n                \n                out1 = self._net(take_per_row(x, crop_offset + crop_eleft, crop_right - crop_eleft))\n                out1 = out1[:, -crop_l:]\n                \n                out2 = self._net(take_per_row(x, crop_offset + crop_left, crop_eright - crop_left))\n                out2 = out2[:, :crop_l]\n                \n                loss = hierarchical_contrastive_loss(\n                    out1,\n                    out2,\n                    temporal_unit=self.temporal_unit\n                )\n                \n                loss.backward()\n                optimizer.step()\n                self.net.update_parameters(self._net)\n                    \n                cum_loss += loss.item()\n                n_epoch_iters += 1\n                \n                self.n_iters += 1\n                \n                if self.after_iter_callback is not None:\n                    self.after_iter_callback(self, loss.item())\n            \n            if interrupted:\n                break\n            \n            cum_loss /= n_epoch_iters\n            loss_log.append(cum_loss)\n            if verbose:\n                print(f\"Epoch #{self.n_epochs}: loss={cum_loss}\")\n            self.n_epochs += 1\n            \n            if self.after_epoch_callback is not None:\n                self.after_epoch_callback(self, cum_loss)\n            \n        return loss_log\n    \n    def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_window=None):\n        out = self.net(x.to(self.device, non_blocking=True), mask)\n        if encoding_window == 'full_series':\n            if slicing is not None:\n                out = out[:, slicing]\n            out = F.max_pool1d(\n                out.transpose(1, 2),\n                kernel_size = out.size(1),\n            ).transpose(1, 2)\n            \n        elif isinstance(encoding_window, int):\n            out = F.max_pool1d(\n                out.transpose(1, 2),\n                kernel_size = encoding_window,\n                stride = 1,\n                padding = encoding_window // 2\n            ).transpose(1, 2)\n            if encoding_window % 2 == 0:\n                out = out[:, :-1]\n            if slicing is not None:\n                out = out[:, slicing]\n            \n        elif encoding_window == 'multiscale':\n            p = 0\n            reprs = []\n            while (1 << p) + 1 < out.size(1):\n                t_out = F.max_pool1d(\n                    out.transpose(1, 2),\n                    kernel_size = (1 << (p + 1)) + 1,\n                    stride = 1,\n                    padding = 1 << p\n                ).transpose(1, 2)\n                if slicing is not None:\n                    t_out = t_out[:, slicing]\n                reprs.append(t_out)\n                p += 1\n            out = torch.cat(reprs, dim=-1)\n            \n        else:\n            if slicing is not None:\n                out = out[:, slicing]\n            \n        return out.cpu()\n    \n    def encode(self, data, mask=None, encoding_window=None, casual=False, sliding_length=None, sliding_padding=0, batch_size=None):\n        ''' Compute representations using the model.\n        \n        Args:\n            data (numpy.ndarray): This should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.\n            mask (str): The mask used by encoder can be specified with this parameter. This can be set to 'binomial', 'continuous', 'all_true', 'all_false' or 'mask_last'.\n            encoding_window (Union[str, int]): When this param is specified, the computed representation would the max pooling over this window. This can be set to 'full_series', 'multiscale' or an integer specifying the pooling kernel size.\n            casual (bool): When this param is set to True, the future informations would not be encoded into representation of each timestamp.\n            sliding_length (Union[int, NoneType]): The length of sliding window. When this param is specified, a sliding inference would be applied on the time series.\n            sliding_padding (int): This param specifies the contextual data length used for inference every sliding windows.\n            batch_size (Union[int, NoneType]): The batch size used for inference. If not specified, this would be the same batch size as training.\n            \n        Returns:\n            repr: The representations for data.\n        '''\n        assert self.net is not None, 'please train or load a net first'\n        assert data.ndim == 3\n        if batch_size is None:\n            batch_size = self.batch_size\n        n_samples, ts_l, _ = data.shape\n\n        org_training = self.net.training\n        self.net.eval()\n        \n        dataset = TensorDataset(torch.from_numpy(data).to(torch.float))\n        loader = DataLoader(dataset, batch_size=batch_size)\n        \n        with torch.no_grad():\n            output = []\n            for batch in loader:\n                x = batch[0]\n                if sliding_length is not None:\n                    reprs = []\n                    if n_samples < batch_size:\n                        calc_buffer = []\n                        calc_buffer_l = 0\n                    for i in range(0, ts_l, sliding_length):\n                        l = i - sliding_padding\n                        r = i + sliding_length + (sliding_padding if not casual else 0)\n                        x_sliding = torch_pad_nan(\n                            x[:, max(l, 0) : min(r, ts_l)],\n                            left=-l if l<0 else 0,\n                            right=r-ts_l if r>ts_l else 0,\n                            dim=1\n                        )\n                        if n_samples < batch_size:\n                            if calc_buffer_l + n_samples > batch_size:\n                                out = self._eval_with_pooling(\n                                    torch.cat(calc_buffer, dim=0),\n                                    mask,\n                                    slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                    encoding_window=encoding_window\n                                )\n                                reprs += torch.split(out, n_samples)\n                                calc_buffer = []\n                                calc_buffer_l = 0\n                            calc_buffer.append(x_sliding)\n                            calc_buffer_l += n_samples\n                        else:\n                            out = self._eval_with_pooling(\n                                x_sliding,\n                                mask,\n                                slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                encoding_window=encoding_window\n                            )\n                            reprs.append(out)\n\n                    if n_samples < batch_size:\n                        if calc_buffer_l > 0:\n                            out = self._eval_with_pooling(\n                                torch.cat(calc_buffer, dim=0),\n                                mask,\n                                slicing=slice(sliding_padding, sliding_padding+sliding_length),\n                                encoding_window=encoding_window\n                            )\n                            reprs += torch.split(out, n_samples)\n                            calc_buffer = []\n                            calc_buffer_l = 0\n                    \n                    out = torch.cat(reprs, dim=1)\n                    if encoding_window == 'full_series':\n                        out = F.max_pool1d(\n                            out.transpose(1, 2).contiguous(),\n                            kernel_size = out.size(1),\n                        ).squeeze(1)\n                else:\n                    out = self._eval_with_pooling(x, mask, encoding_window=encoding_window)\n                    if encoding_window == 'full_series':\n                        out = out.squeeze(1)\n                        \n                output.append(out)\n                \n            output = torch.cat(output, dim=0)\n            \n        self.net.train(org_training)\n        return output.numpy()\n    \n    def save(self, fn):\n        ''' Save the model to a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        torch.save(self.net.state_dict(), fn)\n    \n    def load(self, fn):\n        ''' Load the model from a file.\n        \n        Args:\n            fn (str): filename.\n        '''\n        state_dict = torch.load(fn, map_location=self.device)\n        self.net.load_state_dict(state_dict)\n    \n"
  },
  {
    "path": "ts_forecasting_methods/ts2vec/utils.py",
    "content": "import os\nimport numpy as np\nimport pickle\nimport torch\nimport random\nfrom datetime import datetime\n\ndef pkl_save(name, var):\n    with open(name, 'wb') as f:\n        pickle.dump(var, f)\n\ndef pkl_load(name):\n    with open(name, 'rb') as f:\n        return pickle.load(f)\n    \ndef torch_pad_nan(arr, left=0, right=0, dim=0):\n    if left > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = left\n        arr = torch.cat((torch.full(padshape, np.nan), arr), dim=dim)\n    if right > 0:\n        padshape = list(arr.shape)\n        padshape[dim] = right\n        arr = torch.cat((arr, torch.full(padshape, np.nan)), dim=dim)\n    return arr\n    \ndef pad_nan_to_target(array, target_length, axis=0, both_side=False):\n    assert array.dtype in [np.float16, np.float32, np.float64]\n    pad_size = target_length - array.shape[axis]\n    if pad_size <= 0:\n        return array\n    npad = [(0, 0)] * array.ndim\n    if both_side:\n        npad[axis] = (pad_size // 2, pad_size - pad_size//2)\n    else:\n        npad[axis] = (0, pad_size)\n    return np.pad(array, pad_width=npad, mode='constant', constant_values=np.nan)\n\ndef split_with_nan(x, sections, axis=0):\n    assert x.dtype in [np.float16, np.float32, np.float64]\n    arrs = np.array_split(x, sections, axis=axis)\n    target_length = arrs[0].shape[axis]\n    for i in range(len(arrs)):\n        arrs[i] = pad_nan_to_target(arrs[i], target_length, axis=axis)\n    return arrs\n\ndef take_per_row(A, indx, num_elem):\n    all_indx = indx[:,None] + np.arange(num_elem)\n    return A[torch.arange(all_indx.shape[0])[:,None], all_indx]\n\ndef centerize_vary_length_series(x):\n    prefix_zeros = np.argmax(~np.isnan(x).all(axis=-1), axis=1)\n    suffix_zeros = np.argmax(~np.isnan(x[:, ::-1]).all(axis=-1), axis=1)\n    offset = (prefix_zeros + suffix_zeros) // 2 - prefix_zeros\n    rows, column_indices = np.ogrid[:x.shape[0], :x.shape[1]]\n    offset[offset < 0] += x.shape[1]\n    column_indices = column_indices - offset[:, np.newaxis]\n    return x[rows, column_indices]\n\ndef data_dropout(arr, p):\n    B, T = arr.shape[0], arr.shape[1]\n    mask = np.full(B*T, False, dtype=np.bool)\n    ele_sel = np.random.choice(\n        B*T,\n        size=int(B*T*p),\n        replace=False\n    )\n    mask[ele_sel] = True\n    res = arr.copy()\n    res[mask.reshape(B, T)] = np.nan\n    return res\n\ndef name_with_datetime(prefix='default'):\n    now = datetime.now()\n    return prefix + '_' + now.strftime(\"%Y%m%d_%H%M%S\")\n\ndef init_dl_program(\n    device_name,\n    seed=None,\n    use_cudnn=True,\n    deterministic=False,\n    benchmark=False,\n    use_tf32=False,\n    max_threads=None\n):\n    import torch\n    if max_threads is not None:\n        torch.set_num_threads(max_threads)  # intraop\n        if torch.get_num_interop_threads() != max_threads:\n            torch.set_num_interop_threads(max_threads)  # interop\n        try:\n            import mkl\n        except:\n            pass\n        else:\n            mkl.set_num_threads(max_threads)\n        \n    if seed is not None:\n        random.seed(seed)\n        seed += 1\n        np.random.seed(seed)\n        seed += 1\n        torch.manual_seed(seed)\n        \n    if isinstance(device_name, (str, int)):\n        device_name = [device_name]\n    \n    devices = []\n    for t in reversed(device_name):\n        t_device = torch.device(t)\n        devices.append(t_device)\n        if t_device.type == 'cuda':\n            assert torch.cuda.is_available()\n            torch.cuda.set_device(t_device)\n            if seed is not None:\n                seed += 1\n                torch.cuda.manual_seed(seed)\n    devices.reverse()\n    torch.backends.cudnn.enabled = use_cudnn\n    torch.backends.cudnn.deterministic = deterministic\n    torch.backends.cudnn.benchmark = benchmark\n    \n    if hasattr(torch.backends.cudnn, 'allow_tf32'):\n        torch.backends.cudnn.allow_tf32 = use_tf32\n        torch.backends.cuda.matmul.allow_tf32 = use_tf32\n        \n    return devices if len(devices) > 1 else devices[0]\n\n"
  }
]